1 /* 2 * Test some powerpc instructions 3 */ 4 5 #include <libcflat.h> 6 #include <asm/processor.h> 7 8 static int verbose; 9 static int volatile is_invalid; 10 static int volatile alignment; 11 12 static void program_check_handler(struct pt_regs *regs, void *opaque) 13 { 14 int *data = opaque; 15 16 if (verbose) { 17 printf("Detected invalid instruction %#018lx: %08x\n", 18 regs->nip, *(uint32_t*)regs->nip); 19 } 20 21 /* the result is bit 16 to 19 of SRR1 22 * bit 0: SRR0 contains the address of the next instruction 23 * bit 1: Trap 24 * bit 2: Privileged instruction 25 * bit 3: Illegal instruction 26 * bit 4: FP enabled exception type 27 */ 28 29 *data = regs->msr >> 16; 30 31 regs->nip += 4; 32 } 33 34 static void alignment_handler(struct pt_regs *regs, void *opaque) 35 { 36 int *data = opaque; 37 38 if (verbose) { 39 printf("Detected alignment exception %#018lx: %08x\n", 40 regs->nip, *(uint32_t*)regs->nip); 41 } 42 43 *data = 1; 44 45 regs->nip += 4; 46 } 47 48 static void test_illegal(void) 49 { 50 report_prefix_push("invalid"); 51 52 is_invalid = 0; 53 54 asm volatile (".long 0"); 55 56 report(is_invalid == 8, "exception"); /* illegal instruction */ 57 58 report_prefix_pop(); 59 } 60 61 static void test_64bit(void) 62 { 63 uint64_t msr; 64 65 report_prefix_push("64bit"); 66 67 asm("mfmsr %[msr]": [msr] "=r" (msr)); 68 69 report(msr & 0x8000000000000000UL, "detected"); 70 71 report_prefix_pop(); 72 } 73 74 /* 75 * Test 'Load String Word Immediate' instruction 76 */ 77 static void test_lswi(void) 78 { 79 int i; 80 char addr[128]; 81 uint64_t regs[32]; 82 83 report_prefix_push("lswi"); 84 85 /* fill memory with sequence */ 86 for (i = 0; i < 128; i++) 87 addr[i] = 1 + i; 88 89 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 90 91 /* 92 * lswi is supposed to cause an alignment exception in little endian 93 * mode, but to be able to check this, we also have to specify the 94 * opcode without mnemonic here since newer versions of GCC refuse 95 * "lswi" when compiling in little endian mode. 96 */ 97 alignment = 0; 98 asm volatile ("mr r12,%[addr];" 99 ".long 0x7d6c24aa;" /* lswi r11,r12,4 */ 100 "std r11,0(%[regs]);" 101 :: [addr] "r" (addr), [regs] "r" (regs) 102 : "r11", "r12", "memory"); 103 report(alignment, "alignment"); 104 105 #else 106 107 /* check incomplete register filling */ 108 asm volatile ("li r12,-1;" 109 "mr r11, r12;" 110 "lswi r11, %[addr], %[len];" 111 "std r11, 0*8(%[regs]);" 112 "std r12, 1*8(%[regs]);" 113 :: 114 [len] "i" (3), 115 [addr] "b" (addr), 116 [regs] "r" (regs) 117 : 118 "r11", "r12", "memory"); 119 report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial"); 120 121 /* check NB = 0 ==> 32 bytes. */ 122 asm volatile ("li r19,-1;" 123 "mr r11, r19; mr r12, r19; mr r13, r19;" 124 "mr r14, r19; mr r15, r19; mr r16, r19;" 125 "mr r17, r19; mr r18, r19;" 126 "lswi r11, %[addr], %[len];" 127 "std r11, 0*8(%[regs]);" 128 "std r12, 1*8(%[regs]);" 129 "std r13, 2*8(%[regs]);" 130 "std r14, 3*8(%[regs]);" 131 "std r15, 4*8(%[regs]);" 132 "std r16, 5*8(%[regs]);" 133 "std r17, 6*8(%[regs]);" 134 "std r18, 7*8(%[regs]);" 135 "std r19, 8*8(%[regs]);" 136 :: 137 [len] "i" (0), 138 [addr] "b" (addr), 139 [regs] "r" (regs) 140 : 141 /* as 32 is the number of bytes, 142 * we should modify 32/4 = 8 regs, from r11 to r18 143 * We check r19 is unmodified by filling it with 1s 144 * before the instruction. 145 */ 146 "r11", "r12", "r13", "r14", "r15", "r16", "r17", 147 "r18", "r19", "memory"); 148 149 report(regs[0] == 0x01020304 && regs[1] == 0x05060708 && 150 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 && 151 regs[4] == 0x11121314 && regs[5] == 0x15161718 && 152 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 && 153 regs[8] == (uint64_t)-1, "length"); 154 155 /* check wrap around to r0 */ 156 asm volatile ("li r31,-1;" 157 "mr r0, r31;" 158 "lswi r31, %[addr], %[len];" 159 "std r31, 0*8(%[regs]);" 160 "std r0, 1*8(%[regs]);" 161 :: 162 [len] "i" (8), 163 [addr] "b" (addr), 164 [regs] "r" (regs) 165 : 166 /* modify two registers from r31, wrap around to r0 */ 167 "r31", "r0", "memory"); 168 169 report(regs[0] == 0x01020304 && regs[1] == 0x05060708, 170 "wrap around to r0"); 171 172 /* check wrap around doesn't break RA */ 173 asm volatile ("mr r29,r1\n" 174 "li r31,-1\n" 175 "mr r0,r31\n" 176 "mr r1, %[addr]\n" 177 ".long 0x7fe154aa\n" /* lswi r31, r1, 10 */ 178 "std r31, 0*8(%[regs])\n" 179 "std r0, 1*8(%[regs])\n" 180 "std r1, 2*8(%[regs])\n" 181 "mr r1,r29\n" 182 :: 183 [addr] "r" (addr), 184 [regs] "r" (regs) 185 : 186 /* loading three registers from r31 wraps around to r1, 187 * r1 is saved to r29, as adding it to the clobber 188 * list doesn't protect it 189 */ 190 "r0", "r29", "r31", "memory"); 191 192 /* doc says it is invalid, real proc stops when it comes to 193 * overwrite the register. 194 * In all the cases, the register must stay untouched 195 */ 196 report(regs[2] == (uint64_t)addr, "Don't overwrite Ra"); 197 198 #endif 199 200 report_prefix_pop(); 201 } 202 203 /* 204 * lswx: Load String Word Indexed X-form 205 * 206 * lswx RT,RA,RB 207 * 208 * EA = (RA|0) + RB 209 * n = XER 210 * 211 * Load n bytes from address EA into (n / 4) consecutive registers, 212 * throught RT -> RT + (n / 4) - 1. 213 * - Data are loaded into 4 low order bytes of registers (Word). 214 * - The unfilled bytes are set to 0. 215 * - The sequence of registers wraps around to GPR0. 216 * - if n == 0, content of RT is undefined 217 * - RT <= RA or RB < RT + (n + 4) is invalid or result is undefined 218 * - RT == RA == 0 is invalid 219 * 220 * For lswx in little-endian mode, an alignment interrupt always occurs. 221 * 222 */ 223 224 static void test_lswx(void) 225 { 226 int i; 227 char addr[128]; 228 uint64_t regs[32]; 229 230 report_prefix_push("lswx"); 231 232 /* fill memory with sequence */ 233 for (i = 0; i < 128; i++) 234 addr[i] = 1 + i; 235 236 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 237 238 /* 239 * lswx is supposed to cause an alignment exception in little endian 240 * mode, but to be able to check this, we also have to specify the 241 * opcode without mnemonic here since newer versions of GCC refuse 242 * "lswx" when compiling in little endian mode. 243 */ 244 alignment = 0; 245 asm volatile ("mtxer %[len];" 246 "mr r11,%[addr];" 247 ".long 0x7d805c2a;" /* lswx r12,0,r11 */ 248 "std r12,0(%[regs]);" 249 :: [len]"r"(4), [addr]"r"(addr), [regs]"r"(regs) 250 : "r11", "r12", "memory"); 251 report(alignment, "alignment"); 252 253 #else 254 255 /* check incomplete register filling */ 256 asm volatile ("mtxer %[len];" 257 "li r12,-1;" 258 "mr r11, r12;" 259 "lswx r11, 0, %[addr];" 260 "std r11, 0*8(%[regs]);" 261 "std r12, 1*8(%[regs]);" 262 :: 263 [len] "r" (3), 264 [addr] "r" (addr), 265 [regs] "r" (regs) 266 : 267 "xer", "r11", "r12", "memory"); 268 report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial"); 269 270 /* check an old know bug: the number of bytes is used as 271 * the number of registers, so try 32 bytes. 272 */ 273 274 asm volatile ("mtxer %[len];" 275 "li r19,-1;" 276 "mr r11, r19; mr r12, r19; mr r13, r19;" 277 "mr r14, r19; mr r15, r19; mr r16, r19;" 278 "mr r17, r19; mr r18, r19;" 279 "lswx r11, 0, %[addr];" 280 "std r11, 0*8(%[regs]);" 281 "std r12, 1*8(%[regs]);" 282 "std r13, 2*8(%[regs]);" 283 "std r14, 3*8(%[regs]);" 284 "std r15, 4*8(%[regs]);" 285 "std r16, 5*8(%[regs]);" 286 "std r17, 6*8(%[regs]);" 287 "std r18, 7*8(%[regs]);" 288 "std r19, 8*8(%[regs]);" 289 :: 290 [len] "r" (32), 291 [addr] "r" (addr), 292 [regs] "r" (regs) 293 : 294 /* as 32 is the number of bytes, 295 * we should modify 32/4 = 8 regs, from r11 to r18 296 * We check r19 is unmodified by filling it with 1s 297 * before the instruction. 298 */ 299 "xer", "r11", "r12", "r13", "r14", "r15", "r16", "r17", 300 "r18", "r19", "memory"); 301 302 report(regs[0] == 0x01020304 && regs[1] == 0x05060708 && 303 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 && 304 regs[4] == 0x11121314 && regs[5] == 0x15161718 && 305 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 && 306 regs[8] == (uint64_t)-1, "length"); 307 308 /* check wrap around to r0 */ 309 310 asm volatile ("mtxer %[len];" 311 "li r31,-1;" 312 "mr r0, r31;" 313 "lswx r31, 0, %[addr];" 314 "std r31, 0*8(%[regs]);" 315 "std r0, 1*8(%[regs]);" 316 :: 317 [len] "r" (8), 318 [addr] "r" (addr), 319 [regs] "r" (regs) 320 : 321 /* modify two registers from r31, wrap around to r0 */ 322 "xer", "r31", "r0", "memory"); 323 324 report(regs[0] == 0x01020304 && regs[1] == 0x05060708, 325 "wrap around to r0"); 326 327 /* check wrap around to r0 over RB doesn't break RB */ 328 329 asm volatile ("mtxer %[len];" 330 "mr r29,r1;" 331 "li r31,-1;" 332 "mr r1,r31;" 333 "mr r0, %[addr];" 334 "lswx r31, 0, r0;" 335 "std r31, 0*8(%[regs]);" 336 "std r0, 1*8(%[regs]);" 337 "std r1, 2*8(%[regs]);" 338 "mr r1,r29;" 339 :: 340 [len] "r" (12), 341 [addr] "r" (addr), 342 [regs] "r" (regs) 343 : 344 /* loading three registers from r31 wraps around to r1, 345 * r1 is saved to r29, as adding it to the clobber 346 * list doesn't protect it 347 */ 348 "xer", "r31", "r0", "r29", "memory"); 349 350 /* doc says it is invalid, real proc stops when it comes to 351 * overwrite the register. 352 * In all the cases, the register must stay untouched 353 */ 354 report(regs[1] == (uint64_t)addr, "Don't overwrite Rb"); 355 356 #endif 357 358 report_prefix_pop(); 359 } 360 361 int main(int argc, char **argv) 362 { 363 int i; 364 365 handle_exception(0x700, program_check_handler, (void *)&is_invalid); 366 handle_exception(0x600, alignment_handler, (void *)&alignment); 367 368 for (i = 1; i < argc; i++) { 369 if (strcmp(argv[i], "-v") == 0) { 370 verbose = 1; 371 } 372 } 373 374 report_prefix_push("emulator"); 375 376 test_64bit(); 377 test_illegal(); 378 test_lswx(); 379 test_lswi(); 380 381 report_prefix_pop(); 382 383 return report_summary(); 384 } 385