1 /* 2 * Test some powerpc instructions 3 */ 4 5 #include <libcflat.h> 6 #include <asm/processor.h> 7 8 static int verbose; 9 static int volatile is_invalid; 10 static int volatile alignment; 11 12 static void program_check_handler(struct pt_regs *regs, void *opaque) 13 { 14 int *data = opaque; 15 16 if (verbose) { 17 printf("Detected invalid instruction %#018lx: %08x\n", 18 regs->nip, *(uint32_t*)regs->nip); 19 } 20 21 /* the result is bit 16 to 19 of SRR1 22 * bit 0: SRR0 contains the address of the next instruction 23 * bit 1: Trap 24 * bit 2: Privileged instruction 25 * bit 3: Illegal instruction 26 * bit 4: FP enabled exception type 27 */ 28 29 *data = regs->msr >> 16; 30 31 regs->nip += 4; 32 } 33 34 static void heai_handler(struct pt_regs *regs, void *opaque) 35 { 36 int *data = opaque; 37 38 if (verbose) { 39 printf("Detected invalid instruction %#018lx: %08x\n", 40 regs->nip, *(uint32_t*)regs->nip); 41 } 42 43 *data = 8; /* Illegal instruction */ 44 45 regs->nip += 4; 46 } 47 48 static void alignment_handler(struct pt_regs *regs, void *opaque) 49 { 50 int *data = opaque; 51 52 if (verbose) { 53 printf("Detected alignment exception %#018lx: %08x\n", 54 regs->nip, *(uint32_t*)regs->nip); 55 } 56 57 *data = 1; 58 59 regs->nip += 4; 60 } 61 62 static void test_illegal(void) 63 { 64 report_prefix_push("invalid"); 65 66 is_invalid = 0; 67 68 asm volatile (".long 0"); 69 70 report(is_invalid == 8, "exception"); /* illegal instruction */ 71 72 report_prefix_pop(); 73 } 74 75 static void test_64bit(void) 76 { 77 uint64_t msr; 78 79 report_prefix_push("64bit"); 80 81 asm("mfmsr %[msr]": [msr] "=r" (msr)); 82 83 report(msr & 0x8000000000000000UL, "detected"); 84 85 report_prefix_pop(); 86 } 87 88 /* 89 * Test 'Load String Word Immediate' instruction 90 */ 91 static void test_lswi(void) 92 { 93 int i; 94 char addr[128]; 95 uint64_t regs[32]; 96 97 report_prefix_push("lswi"); 98 99 /* fill memory with sequence */ 100 for (i = 0; i < 128; i++) 101 addr[i] = 1 + i; 102 103 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 104 105 /* 106 * lswi is supposed to cause an alignment exception in little endian 107 * mode, but to be able to check this, we also have to specify the 108 * opcode without mnemonic here since newer versions of GCC refuse 109 * "lswi" when compiling in little endian mode. 110 */ 111 alignment = 0; 112 asm volatile ("mr r12,%[addr];" 113 ".long 0x7d6c24aa;" /* lswi r11,r12,4 */ 114 "std r11,0(%[regs]);" 115 :: [addr] "r" (addr), [regs] "r" (regs) 116 : "r11", "r12", "memory"); 117 report(alignment, "alignment"); 118 119 #else 120 121 /* check incomplete register filling */ 122 asm volatile ("li r12,-1;" 123 "mr r11, r12;" 124 "lswi r11, %[addr], %[len];" 125 "std r11, 0*8(%[regs]);" 126 "std r12, 1*8(%[regs]);" 127 :: 128 [len] "i" (3), 129 [addr] "b" (addr), 130 [regs] "r" (regs) 131 : 132 "r11", "r12", "memory"); 133 report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial"); 134 135 /* check NB = 0 ==> 32 bytes. */ 136 asm volatile ("li r19,-1;" 137 "mr r11, r19; mr r12, r19; mr r13, r19;" 138 "mr r14, r19; mr r15, r19; mr r16, r19;" 139 "mr r17, r19; mr r18, r19;" 140 "lswi r11, %[addr], %[len];" 141 "std r11, 0*8(%[regs]);" 142 "std r12, 1*8(%[regs]);" 143 "std r13, 2*8(%[regs]);" 144 "std r14, 3*8(%[regs]);" 145 "std r15, 4*8(%[regs]);" 146 "std r16, 5*8(%[regs]);" 147 "std r17, 6*8(%[regs]);" 148 "std r18, 7*8(%[regs]);" 149 "std r19, 8*8(%[regs]);" 150 :: 151 [len] "i" (0), 152 [addr] "b" (addr), 153 [regs] "r" (regs) 154 : 155 /* as 32 is the number of bytes, 156 * we should modify 32/4 = 8 regs, from r11 to r18 157 * We check r19 is unmodified by filling it with 1s 158 * before the instruction. 159 */ 160 "r11", "r12", "r13", "r14", "r15", "r16", "r17", 161 "r18", "r19", "memory"); 162 163 report(regs[0] == 0x01020304 && regs[1] == 0x05060708 && 164 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 && 165 regs[4] == 0x11121314 && regs[5] == 0x15161718 && 166 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 && 167 regs[8] == (uint64_t)-1, "length"); 168 169 /* check wrap around to r0 */ 170 asm volatile ("li r31,-1;" 171 "mr r0, r31;" 172 "lswi r31, %[addr], %[len];" 173 "std r31, 0*8(%[regs]);" 174 "std r0, 1*8(%[regs]);" 175 :: 176 [len] "i" (8), 177 [addr] "b" (addr), 178 [regs] "r" (regs) 179 : 180 /* modify two registers from r31, wrap around to r0 */ 181 "r31", "r0", "memory"); 182 183 report(regs[0] == 0x01020304 && regs[1] == 0x05060708, 184 "wrap around to r0"); 185 186 /* check wrap around doesn't break RA */ 187 asm volatile ("mr r29,r1\n" 188 "li r31,-1\n" 189 "mr r0,r31\n" 190 "mr r1, %[addr]\n" 191 ".long 0x7fe154aa\n" /* lswi r31, r1, 10 */ 192 "std r31, 0*8(%[regs])\n" 193 "std r0, 1*8(%[regs])\n" 194 "std r1, 2*8(%[regs])\n" 195 "mr r1,r29\n" 196 :: 197 [addr] "r" (addr), 198 [regs] "r" (regs) 199 : 200 /* loading three registers from r31 wraps around to r1, 201 * r1 is saved to r29, as adding it to the clobber 202 * list doesn't protect it 203 */ 204 "r0", "r29", "r31", "memory"); 205 206 /* doc says it is invalid, real proc stops when it comes to 207 * overwrite the register. 208 * In all the cases, the register must stay untouched 209 */ 210 report(regs[2] == (uint64_t)addr, "Don't overwrite Ra"); 211 212 #endif 213 214 report_prefix_pop(); 215 } 216 217 /* 218 * lswx: Load String Word Indexed X-form 219 * 220 * lswx RT,RA,RB 221 * 222 * EA = (RA|0) + RB 223 * n = XER 224 * 225 * Load n bytes from address EA into (n / 4) consecutive registers, 226 * throught RT -> RT + (n / 4) - 1. 227 * - Data are loaded into 4 low order bytes of registers (Word). 228 * - The unfilled bytes are set to 0. 229 * - The sequence of registers wraps around to GPR0. 230 * - if n == 0, content of RT is undefined 231 * - RT <= RA or RB < RT + (n + 4) is invalid or result is undefined 232 * - RT == RA == 0 is invalid 233 * 234 * For lswx in little-endian mode, an alignment interrupt always occurs. 235 * 236 */ 237 238 static void test_lswx(void) 239 { 240 int i; 241 char addr[128]; 242 uint64_t regs[32]; 243 244 report_prefix_push("lswx"); 245 246 /* fill memory with sequence */ 247 for (i = 0; i < 128; i++) 248 addr[i] = 1 + i; 249 250 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 251 252 /* 253 * lswx is supposed to cause an alignment exception in little endian 254 * mode, but to be able to check this, we also have to specify the 255 * opcode without mnemonic here since newer versions of GCC refuse 256 * "lswx" when compiling in little endian mode. 257 */ 258 alignment = 0; 259 asm volatile ("mtxer %[len];" 260 "mr r11,%[addr];" 261 ".long 0x7d805c2a;" /* lswx r12,0,r11 */ 262 "std r12,0(%[regs]);" 263 :: [len]"r"(4), [addr]"r"(addr), [regs]"r"(regs) 264 : "r11", "r12", "memory"); 265 report(alignment, "alignment"); 266 267 #else 268 269 /* check incomplete register filling */ 270 asm volatile ("mtxer %[len];" 271 "li r12,-1;" 272 "mr r11, r12;" 273 "lswx r11, 0, %[addr];" 274 "std r11, 0*8(%[regs]);" 275 "std r12, 1*8(%[regs]);" 276 :: 277 [len] "r" (3), 278 [addr] "r" (addr), 279 [regs] "r" (regs) 280 : 281 "xer", "r11", "r12", "memory"); 282 report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial"); 283 284 /* check an old know bug: the number of bytes is used as 285 * the number of registers, so try 32 bytes. 286 */ 287 288 asm volatile ("mtxer %[len];" 289 "li r19,-1;" 290 "mr r11, r19; mr r12, r19; mr r13, r19;" 291 "mr r14, r19; mr r15, r19; mr r16, r19;" 292 "mr r17, r19; mr r18, r19;" 293 "lswx r11, 0, %[addr];" 294 "std r11, 0*8(%[regs]);" 295 "std r12, 1*8(%[regs]);" 296 "std r13, 2*8(%[regs]);" 297 "std r14, 3*8(%[regs]);" 298 "std r15, 4*8(%[regs]);" 299 "std r16, 5*8(%[regs]);" 300 "std r17, 6*8(%[regs]);" 301 "std r18, 7*8(%[regs]);" 302 "std r19, 8*8(%[regs]);" 303 :: 304 [len] "r" (32), 305 [addr] "r" (addr), 306 [regs] "r" (regs) 307 : 308 /* as 32 is the number of bytes, 309 * we should modify 32/4 = 8 regs, from r11 to r18 310 * We check r19 is unmodified by filling it with 1s 311 * before the instruction. 312 */ 313 "xer", "r11", "r12", "r13", "r14", "r15", "r16", "r17", 314 "r18", "r19", "memory"); 315 316 report(regs[0] == 0x01020304 && regs[1] == 0x05060708 && 317 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 && 318 regs[4] == 0x11121314 && regs[5] == 0x15161718 && 319 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 && 320 regs[8] == (uint64_t)-1, "length"); 321 322 /* check wrap around to r0 */ 323 324 asm volatile ("mtxer %[len];" 325 "li r31,-1;" 326 "mr r0, r31;" 327 "lswx r31, 0, %[addr];" 328 "std r31, 0*8(%[regs]);" 329 "std r0, 1*8(%[regs]);" 330 :: 331 [len] "r" (8), 332 [addr] "r" (addr), 333 [regs] "r" (regs) 334 : 335 /* modify two registers from r31, wrap around to r0 */ 336 "xer", "r31", "r0", "memory"); 337 338 report(regs[0] == 0x01020304 && regs[1] == 0x05060708, 339 "wrap around to r0"); 340 341 /* check wrap around to r0 over RB doesn't break RB */ 342 343 asm volatile ("mtxer %[len];" 344 "mr r29,r1;" 345 "li r31,-1;" 346 "mr r1,r31;" 347 "mr r0, %[addr];" 348 "lswx r31, 0, r0;" 349 "std r31, 0*8(%[regs]);" 350 "std r0, 1*8(%[regs]);" 351 "std r1, 2*8(%[regs]);" 352 "mr r1,r29;" 353 :: 354 [len] "r" (12), 355 [addr] "r" (addr), 356 [regs] "r" (regs) 357 : 358 /* loading three registers from r31 wraps around to r1, 359 * r1 is saved to r29, as adding it to the clobber 360 * list doesn't protect it 361 */ 362 "xer", "r31", "r0", "r29", "memory"); 363 364 /* doc says it is invalid, real proc stops when it comes to 365 * overwrite the register. 366 * In all the cases, the register must stay untouched 367 */ 368 report(regs[1] == (uint64_t)addr, "Don't overwrite Rb"); 369 370 #endif 371 372 report_prefix_pop(); 373 } 374 375 int main(int argc, char **argv) 376 { 377 int i; 378 379 handle_exception(0x700, program_check_handler, (void *)&is_invalid); 380 if (cpu_has_heai) 381 handle_exception(0xe40, heai_handler, (void *)&is_invalid); 382 handle_exception(0x600, alignment_handler, (void *)&alignment); 383 384 for (i = 1; i < argc; i++) { 385 if (strcmp(argv[i], "-v") == 0) { 386 verbose = 1; 387 } 388 } 389 390 report_prefix_push("emulator"); 391 392 test_64bit(); 393 test_illegal(); 394 test_lswx(); 395 test_lswi(); 396 397 report_prefix_pop(); 398 399 return report_summary(); 400 } 401