xref: /kvm-unit-tests/powerpc/emulator.c (revision cd27b4baa5e1b8e1bc3c2e1f25bf820bd9b3d5a9)
1be9b007bSLaurent Vivier /*
2be9b007bSLaurent Vivier  * Test some powerpc instructions
3be9b007bSLaurent Vivier  */
4be9b007bSLaurent Vivier 
5be9b007bSLaurent Vivier #include <libcflat.h>
6be9b007bSLaurent Vivier #include <asm/processor.h>
7be9b007bSLaurent Vivier 
8be9b007bSLaurent Vivier static int verbose;
9be9b007bSLaurent Vivier static int volatile is_invalid;
10c6699676SLaurent Vivier static int volatile alignment;
11be9b007bSLaurent Vivier 
program_check_handler(struct pt_regs * regs,void * opaque)12be9b007bSLaurent Vivier static void program_check_handler(struct pt_regs *regs, void *opaque)
13be9b007bSLaurent Vivier {
14be9b007bSLaurent Vivier 	int *data = opaque;
15be9b007bSLaurent Vivier 
16be9b007bSLaurent Vivier 	if (verbose) {
17fd6aada0SRadim Krčmář 		printf("Detected invalid instruction %#018lx: %08x\n",
18be9b007bSLaurent Vivier 		       regs->nip, *(uint32_t*)regs->nip);
19be9b007bSLaurent Vivier 	}
20be9b007bSLaurent Vivier 
21be9b007bSLaurent Vivier 	/* the result is bit 16 to 19 of SRR1
22be9b007bSLaurent Vivier 	 * bit 0: SRR0 contains the address of the next instruction
23be9b007bSLaurent Vivier 	 * bit 1: Trap
24be9b007bSLaurent Vivier 	 * bit 2: Privileged instruction
25be9b007bSLaurent Vivier 	 * bit 3: Illegal instruction
26be9b007bSLaurent Vivier 	 * bit 4: FP enabled exception type
27be9b007bSLaurent Vivier 	 */
28be9b007bSLaurent Vivier 
29be9b007bSLaurent Vivier 	*data = regs->msr >> 16;
30be9b007bSLaurent Vivier 
31be9b007bSLaurent Vivier 	regs->nip += 4;
32be9b007bSLaurent Vivier }
33be9b007bSLaurent Vivier 
heai_handler(struct pt_regs * regs,void * opaque)34*cd27b4baSNicholas Piggin static void heai_handler(struct pt_regs *regs, void *opaque)
35*cd27b4baSNicholas Piggin {
36*cd27b4baSNicholas Piggin 	int *data = opaque;
37*cd27b4baSNicholas Piggin 
38*cd27b4baSNicholas Piggin 	if (verbose) {
39*cd27b4baSNicholas Piggin 		printf("Detected invalid instruction %#018lx: %08x\n",
40*cd27b4baSNicholas Piggin 		       regs->nip, *(uint32_t*)regs->nip);
41*cd27b4baSNicholas Piggin 	}
42*cd27b4baSNicholas Piggin 
43*cd27b4baSNicholas Piggin 	*data = 8; /* Illegal instruction */
44*cd27b4baSNicholas Piggin 
45*cd27b4baSNicholas Piggin 	regs->nip += 4;
46*cd27b4baSNicholas Piggin }
47*cd27b4baSNicholas Piggin 
alignment_handler(struct pt_regs * regs,void * opaque)48c6699676SLaurent Vivier static void alignment_handler(struct pt_regs *regs, void *opaque)
49c6699676SLaurent Vivier {
50c6699676SLaurent Vivier 	int *data = opaque;
51c6699676SLaurent Vivier 
520fd70fe3SLaurent Vivier 	if (verbose) {
53fd6aada0SRadim Krčmář 		printf("Detected alignment exception %#018lx: %08x\n",
54c6699676SLaurent Vivier 		       regs->nip, *(uint32_t*)regs->nip);
550fd70fe3SLaurent Vivier 	}
56c6699676SLaurent Vivier 
57c6699676SLaurent Vivier 	*data = 1;
58c6699676SLaurent Vivier 
59c6699676SLaurent Vivier 	regs->nip += 4;
60c6699676SLaurent Vivier }
61c6699676SLaurent Vivier 
test_illegal(void)62be9b007bSLaurent Vivier static void test_illegal(void)
63be9b007bSLaurent Vivier {
64be9b007bSLaurent Vivier 	report_prefix_push("invalid");
65be9b007bSLaurent Vivier 
66be9b007bSLaurent Vivier 	is_invalid = 0;
67be9b007bSLaurent Vivier 
68be9b007bSLaurent Vivier 	asm volatile (".long 0");
69be9b007bSLaurent Vivier 
70a299895bSThomas Huth 	report(is_invalid == 8, "exception"); /* illegal instruction */
71be9b007bSLaurent Vivier 
72be9b007bSLaurent Vivier 	report_prefix_pop();
73be9b007bSLaurent Vivier }
74be9b007bSLaurent Vivier 
test_64bit(void)758260a156SLaurent Vivier static void test_64bit(void)
768260a156SLaurent Vivier {
778260a156SLaurent Vivier 	uint64_t msr;
788260a156SLaurent Vivier 
798260a156SLaurent Vivier 	report_prefix_push("64bit");
808260a156SLaurent Vivier 
818260a156SLaurent Vivier 	asm("mfmsr %[msr]": [msr] "=r" (msr));
828260a156SLaurent Vivier 
83a299895bSThomas Huth 	report(msr & 0x8000000000000000UL, "detected");
848260a156SLaurent Vivier 
858260a156SLaurent Vivier 	report_prefix_pop();
868260a156SLaurent Vivier }
878260a156SLaurent Vivier 
8862cdf7a5SNico Boehr /*
891672d173SThomas Huth  * Test 'Load String Word Immediate' instruction
901672d173SThomas Huth  */
test_lswi(void)911672d173SThomas Huth static void test_lswi(void)
921672d173SThomas Huth {
931672d173SThomas Huth 	int i;
941672d173SThomas Huth 	char addr[128];
951672d173SThomas Huth 	uint64_t regs[32];
961672d173SThomas Huth 
971672d173SThomas Huth 	report_prefix_push("lswi");
981672d173SThomas Huth 
991672d173SThomas Huth 	/* fill memory with sequence */
1001672d173SThomas Huth 	for (i = 0; i < 128; i++)
1011672d173SThomas Huth 		addr[i] = 1 + i;
1021672d173SThomas Huth 
1030c111b37SThomas Huth #if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1040c111b37SThomas Huth 
1050c111b37SThomas Huth 	/*
1060c111b37SThomas Huth 	 * lswi is supposed to cause an alignment exception in little endian
1070c111b37SThomas Huth 	 * mode, but to be able to check this, we also have to specify the
1080c111b37SThomas Huth 	 * opcode without mnemonic here since newer versions of GCC refuse
1090c111b37SThomas Huth 	 * "lswi" when compiling in little endian mode.
1100c111b37SThomas Huth 	 */
1111672d173SThomas Huth 	alignment = 0;
1120c111b37SThomas Huth 	asm volatile ("mr r12,%[addr];"
1130c111b37SThomas Huth 		      ".long 0x7d6c24aa;"       /* lswi r11,r12,4 */
1140c111b37SThomas Huth 		      "std r11,0(%[regs]);"
1150c111b37SThomas Huth 		       :: [addr] "r" (addr), [regs] "r" (regs)
1160c111b37SThomas Huth 		       : "r11", "r12", "memory");
1170c111b37SThomas Huth 	report(alignment, "alignment");
1180c111b37SThomas Huth 
1190c111b37SThomas Huth #else
1200c111b37SThomas Huth 
1210c111b37SThomas Huth 	/* check incomplete register filling */
1221672d173SThomas Huth 	asm volatile ("li r12,-1;"
1231672d173SThomas Huth 		      "mr r11, r12;"
1241672d173SThomas Huth 		      "lswi r11, %[addr], %[len];"
1251672d173SThomas Huth 		      "std r11, 0*8(%[regs]);"
1261672d173SThomas Huth 		      "std r12, 1*8(%[regs]);"
1271672d173SThomas Huth 		      ::
1281672d173SThomas Huth 		      [len] "i" (3),
1291672d173SThomas Huth 		      [addr] "b" (addr),
1301672d173SThomas Huth 		      [regs] "r" (regs)
1311672d173SThomas Huth 		      :
1321672d173SThomas Huth 		      "r11", "r12", "memory");
133a299895bSThomas Huth 	report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial");
1341672d173SThomas Huth 
1351672d173SThomas Huth 	/* check NB = 0 ==> 32 bytes. */
1361672d173SThomas Huth 	asm volatile ("li r19,-1;"
1371672d173SThomas Huth 		      "mr r11, r19; mr r12, r19; mr r13, r19;"
1381672d173SThomas Huth 		      "mr r14, r19; mr r15, r19; mr r16, r19;"
1391672d173SThomas Huth 		      "mr r17, r19; mr r18, r19;"
1401672d173SThomas Huth 		      "lswi r11, %[addr], %[len];"
1411672d173SThomas Huth 		      "std r11, 0*8(%[regs]);"
1421672d173SThomas Huth 		      "std r12, 1*8(%[regs]);"
1431672d173SThomas Huth 		      "std r13, 2*8(%[regs]);"
1441672d173SThomas Huth 		      "std r14, 3*8(%[regs]);"
1451672d173SThomas Huth 		      "std r15, 4*8(%[regs]);"
1461672d173SThomas Huth 		      "std r16, 5*8(%[regs]);"
1471672d173SThomas Huth 		      "std r17, 6*8(%[regs]);"
1481672d173SThomas Huth 		      "std r18, 7*8(%[regs]);"
1491672d173SThomas Huth 		      "std r19, 8*8(%[regs]);"
1501672d173SThomas Huth 		      ::
1511672d173SThomas Huth 		      [len] "i" (0),
1521672d173SThomas Huth 		      [addr] "b" (addr),
1531672d173SThomas Huth 		      [regs] "r" (regs)
1541672d173SThomas Huth 		      :
1551672d173SThomas Huth 		      /* as 32 is the number of bytes,
1561672d173SThomas Huth 		       * we should modify 32/4 = 8 regs, from r11 to r18
1571672d173SThomas Huth 		       * We check r19 is unmodified by filling it with 1s
1581672d173SThomas Huth 		       * before the instruction.
1591672d173SThomas Huth 		       */
1601672d173SThomas Huth 		      "r11", "r12", "r13", "r14", "r15", "r16", "r17",
1611672d173SThomas Huth 		      "r18", "r19", "memory");
1621672d173SThomas Huth 
163a299895bSThomas Huth 	report(regs[0] == 0x01020304 && regs[1] == 0x05060708 &&
1641672d173SThomas Huth 	       regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 &&
1651672d173SThomas Huth 	       regs[4] == 0x11121314 && regs[5] == 0x15161718 &&
1661672d173SThomas Huth 	       regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 &&
167a299895bSThomas Huth 	       regs[8] == (uint64_t)-1, "length");
1681672d173SThomas Huth 
1691672d173SThomas Huth 	/* check wrap around to r0 */
1701672d173SThomas Huth 	asm volatile ("li r31,-1;"
1711672d173SThomas Huth 		      "mr r0, r31;"
1721672d173SThomas Huth 		      "lswi r31, %[addr], %[len];"
1731672d173SThomas Huth 		      "std r31, 0*8(%[regs]);"
1741672d173SThomas Huth 		      "std r0, 1*8(%[regs]);"
1751672d173SThomas Huth 		      ::
1761672d173SThomas Huth 		      [len] "i" (8),
1771672d173SThomas Huth 		      [addr] "b" (addr),
1781672d173SThomas Huth 		      [regs] "r" (regs)
1791672d173SThomas Huth 		      :
1801672d173SThomas Huth 		      /* modify two registers from r31, wrap around to r0 */
1811672d173SThomas Huth 		      "r31", "r0", "memory");
1821672d173SThomas Huth 
183a299895bSThomas Huth 	report(regs[0] == 0x01020304 && regs[1] == 0x05060708,
184a299895bSThomas Huth 	       "wrap around to r0");
1851672d173SThomas Huth 
1861672d173SThomas Huth 	/* check wrap around doesn't break RA */
1871672d173SThomas Huth 	asm volatile ("mr r29,r1\n"
1881672d173SThomas Huth 		      "li r31,-1\n"
1891672d173SThomas Huth 		      "mr r0,r31\n"
1901672d173SThomas Huth 		      "mr r1, %[addr]\n"
1911672d173SThomas Huth 		      ".long 0x7fe154aa\n"       /* lswi r31, r1, 10 */
1921672d173SThomas Huth 		      "std r31, 0*8(%[regs])\n"
1931672d173SThomas Huth 		      "std r0, 1*8(%[regs])\n"
1941672d173SThomas Huth 		      "std r1, 2*8(%[regs])\n"
1951672d173SThomas Huth 		      "mr r1,r29\n"
1961672d173SThomas Huth 		      ::
1971672d173SThomas Huth 		      [addr] "r" (addr),
1981672d173SThomas Huth 		      [regs] "r" (regs)
1991672d173SThomas Huth 		      :
2001672d173SThomas Huth 		      /* loading three registers from r31 wraps around to r1,
2011672d173SThomas Huth 		       * r1 is saved to r29, as adding it to the clobber
2021672d173SThomas Huth 		       * list doesn't protect it
2031672d173SThomas Huth 		       */
2041672d173SThomas Huth 		      "r0", "r29", "r31", "memory");
2051672d173SThomas Huth 
2061672d173SThomas Huth 	/* doc says it is invalid, real proc stops when it comes to
2071672d173SThomas Huth 	 * overwrite the register.
2081672d173SThomas Huth 	 * In all the cases, the register must stay untouched
2091672d173SThomas Huth 	 */
210a299895bSThomas Huth 	report(regs[2] == (uint64_t)addr, "Don't overwrite Ra");
2111672d173SThomas Huth 
2120c111b37SThomas Huth #endif
2130c111b37SThomas Huth 
2141672d173SThomas Huth 	report_prefix_pop();
2151672d173SThomas Huth }
2161672d173SThomas Huth 
217a46c6196SLaurent Vivier /*
218a46c6196SLaurent Vivier  * lswx: Load String Word Indexed X-form
219a46c6196SLaurent Vivier  *
220a46c6196SLaurent Vivier  *     lswx RT,RA,RB
221a46c6196SLaurent Vivier  *
222a46c6196SLaurent Vivier  * EA = (RA|0) + RB
223a46c6196SLaurent Vivier  * n  = XER
224a46c6196SLaurent Vivier  *
225a46c6196SLaurent Vivier  * Load n bytes from address EA into (n / 4) consecutive registers,
226a46c6196SLaurent Vivier  * throught RT -> RT + (n / 4) - 1.
227a46c6196SLaurent Vivier  * - Data are loaded into 4 low order bytes of registers (Word).
228a46c6196SLaurent Vivier  * - The unfilled bytes are set to 0.
229a46c6196SLaurent Vivier  * - The sequence of registers wraps around to GPR0.
230a46c6196SLaurent Vivier  * - if n == 0, content of RT is undefined
231a46c6196SLaurent Vivier  * - RT <= RA or RB < RT + (n + 4) is invalid or result is undefined
232a46c6196SLaurent Vivier  * - RT == RA == 0 is invalid
233a46c6196SLaurent Vivier  *
234c6699676SLaurent Vivier  * For lswx in little-endian mode, an alignment interrupt always occurs.
235c6699676SLaurent Vivier  *
236a46c6196SLaurent Vivier  */
237a46c6196SLaurent Vivier 
test_lswx(void)238a46c6196SLaurent Vivier static void test_lswx(void)
239a46c6196SLaurent Vivier {
240a46c6196SLaurent Vivier 	int i;
241a46c6196SLaurent Vivier 	char addr[128];
242a46c6196SLaurent Vivier 	uint64_t regs[32];
243a46c6196SLaurent Vivier 
244a46c6196SLaurent Vivier 	report_prefix_push("lswx");
245a46c6196SLaurent Vivier 
246a46c6196SLaurent Vivier 	/* fill memory with sequence */
247a46c6196SLaurent Vivier 	for (i = 0; i < 128; i++)
248a46c6196SLaurent Vivier 		addr[i] = 1 + i;
249a46c6196SLaurent Vivier 
2500c111b37SThomas Huth #if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
251a46c6196SLaurent Vivier 
2520c111b37SThomas Huth 	/*
2530c111b37SThomas Huth 	 * lswx is supposed to cause an alignment exception in little endian
2540c111b37SThomas Huth 	 * mode, but to be able to check this, we also have to specify the
2550c111b37SThomas Huth 	 * opcode without mnemonic here since newer versions of GCC refuse
2560c111b37SThomas Huth 	 * "lswx" when compiling in little endian mode.
2570c111b37SThomas Huth 	 */
258c6699676SLaurent Vivier 	alignment = 0;
259a46c6196SLaurent Vivier 	asm volatile ("mtxer %[len];"
2600c111b37SThomas Huth 		      "mr r11,%[addr];"
2610c111b37SThomas Huth 		      ".long 0x7d805c2a;"       /* lswx r12,0,r11 */
2620c111b37SThomas Huth 		      "std r12,0(%[regs]);"
2630c111b37SThomas Huth 		      :: [len]"r"(4), [addr]"r"(addr), [regs]"r"(regs)
2640c111b37SThomas Huth 		      : "r11", "r12", "memory");
2650c111b37SThomas Huth 	report(alignment, "alignment");
2660c111b37SThomas Huth 
2670c111b37SThomas Huth #else
2680c111b37SThomas Huth 
2690c111b37SThomas Huth 	/* check incomplete register filling */
2700c111b37SThomas Huth 	asm volatile ("mtxer %[len];"
271a46c6196SLaurent Vivier 		      "li r12,-1;"
272a46c6196SLaurent Vivier 		      "mr r11, r12;"
273a46c6196SLaurent Vivier 		      "lswx r11, 0, %[addr];"
274a46c6196SLaurent Vivier 		      "std r11, 0*8(%[regs]);"
275a46c6196SLaurent Vivier 		      "std r12, 1*8(%[regs]);"
276a46c6196SLaurent Vivier 		      ::
277a46c6196SLaurent Vivier 		      [len] "r" (3),
278a46c6196SLaurent Vivier 		      [addr] "r" (addr),
279a46c6196SLaurent Vivier 		      [regs] "r" (regs)
280a46c6196SLaurent Vivier 		      :
281a46c6196SLaurent Vivier 		      "xer", "r11", "r12", "memory");
282a299895bSThomas Huth 	report(regs[0] == 0x01020300 && regs[1] == (uint64_t)-1, "partial");
283a46c6196SLaurent Vivier 
284a46c6196SLaurent Vivier 	/* check an old know bug: the number of bytes is used as
285a46c6196SLaurent Vivier 	 * the number of registers, so try 32 bytes.
286a46c6196SLaurent Vivier 	 */
287a46c6196SLaurent Vivier 
288a46c6196SLaurent Vivier 	asm volatile ("mtxer %[len];"
289a46c6196SLaurent Vivier 		      "li r19,-1;"
290a46c6196SLaurent Vivier 		      "mr r11, r19; mr r12, r19; mr r13, r19;"
291a46c6196SLaurent Vivier 		      "mr r14, r19; mr r15, r19; mr r16, r19;"
292a46c6196SLaurent Vivier 		      "mr r17, r19; mr r18, r19;"
293a46c6196SLaurent Vivier 		      "lswx r11, 0, %[addr];"
294a46c6196SLaurent Vivier 		      "std r11, 0*8(%[regs]);"
295a46c6196SLaurent Vivier 		      "std r12, 1*8(%[regs]);"
296a46c6196SLaurent Vivier 		      "std r13, 2*8(%[regs]);"
297a46c6196SLaurent Vivier 		      "std r14, 3*8(%[regs]);"
298a46c6196SLaurent Vivier 		      "std r15, 4*8(%[regs]);"
299a46c6196SLaurent Vivier 		      "std r16, 5*8(%[regs]);"
300a46c6196SLaurent Vivier 		      "std r17, 6*8(%[regs]);"
301a46c6196SLaurent Vivier 		      "std r18, 7*8(%[regs]);"
302a46c6196SLaurent Vivier 		      "std r19, 8*8(%[regs]);"
303a46c6196SLaurent Vivier 		      ::
304a46c6196SLaurent Vivier 		      [len] "r" (32),
305a46c6196SLaurent Vivier 		      [addr] "r" (addr),
306a46c6196SLaurent Vivier 		      [regs] "r" (regs)
307a46c6196SLaurent Vivier 		      :
308a46c6196SLaurent Vivier 		      /* as 32 is the number of bytes,
309a46c6196SLaurent Vivier 		       * we should modify 32/4 = 8 regs, from r11 to r18
310a46c6196SLaurent Vivier 		       * We check r19 is unmodified by filling it with 1s
311a46c6196SLaurent Vivier 		       * before the instruction.
312a46c6196SLaurent Vivier 		       */
313a46c6196SLaurent Vivier 		      "xer", "r11", "r12", "r13", "r14", "r15", "r16", "r17",
314a46c6196SLaurent Vivier 		      "r18", "r19", "memory");
315a46c6196SLaurent Vivier 
316a299895bSThomas Huth 	report(regs[0] == 0x01020304 && regs[1] == 0x05060708 &&
317a46c6196SLaurent Vivier 	       regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 &&
318a46c6196SLaurent Vivier 	       regs[4] == 0x11121314 && regs[5] == 0x15161718 &&
319a46c6196SLaurent Vivier 	       regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 &&
320a299895bSThomas Huth 	       regs[8] == (uint64_t)-1, "length");
321a46c6196SLaurent Vivier 
322a46c6196SLaurent Vivier 	/* check wrap around to r0 */
323a46c6196SLaurent Vivier 
324a46c6196SLaurent Vivier 	asm volatile ("mtxer %[len];"
325a46c6196SLaurent Vivier 		      "li r31,-1;"
326a46c6196SLaurent Vivier 		      "mr r0, r31;"
327a46c6196SLaurent Vivier 		      "lswx r31, 0, %[addr];"
328a46c6196SLaurent Vivier 		      "std r31, 0*8(%[regs]);"
329a46c6196SLaurent Vivier 		      "std r0, 1*8(%[regs]);"
330a46c6196SLaurent Vivier 		      ::
331a46c6196SLaurent Vivier 		      [len] "r" (8),
332a46c6196SLaurent Vivier 		      [addr] "r" (addr),
333a46c6196SLaurent Vivier 		      [regs] "r" (regs)
334a46c6196SLaurent Vivier 		      :
335a46c6196SLaurent Vivier 		      /* modify two registers from r31, wrap around to r0 */
336a46c6196SLaurent Vivier 		      "xer", "r31", "r0", "memory");
337a46c6196SLaurent Vivier 
338a299895bSThomas Huth 	report(regs[0] == 0x01020304 && regs[1] == 0x05060708,
339a299895bSThomas Huth 	       "wrap around to r0");
340a46c6196SLaurent Vivier 
341a46c6196SLaurent Vivier 	/* check wrap around to r0 over RB doesn't break RB */
342a46c6196SLaurent Vivier 
343a46c6196SLaurent Vivier 	asm volatile ("mtxer %[len];"
344a46c6196SLaurent Vivier 		      "mr r29,r1;"
345a46c6196SLaurent Vivier 		      "li r31,-1;"
346a46c6196SLaurent Vivier 		      "mr r1,r31;"
347a46c6196SLaurent Vivier 		      "mr r0, %[addr];"
348a46c6196SLaurent Vivier 		      "lswx r31, 0, r0;"
349a46c6196SLaurent Vivier 		      "std r31, 0*8(%[regs]);"
350a46c6196SLaurent Vivier 		      "std r0, 1*8(%[regs]);"
351a46c6196SLaurent Vivier 		      "std r1, 2*8(%[regs]);"
352a46c6196SLaurent Vivier 		      "mr r1,r29;"
353a46c6196SLaurent Vivier 		      ::
354a46c6196SLaurent Vivier 		      [len] "r" (12),
355a46c6196SLaurent Vivier 		      [addr] "r" (addr),
356a46c6196SLaurent Vivier 		      [regs] "r" (regs)
357a46c6196SLaurent Vivier 		      :
358a46c6196SLaurent Vivier 		      /* loading three registers from r31 wraps around to r1,
359a46c6196SLaurent Vivier 		       * r1 is saved to r29, as adding it to the clobber
360a46c6196SLaurent Vivier 		       * list doesn't protect it
361a46c6196SLaurent Vivier 		       */
362a46c6196SLaurent Vivier 		      "xer", "r31", "r0", "r29", "memory");
363a46c6196SLaurent Vivier 
364a46c6196SLaurent Vivier 	/* doc says it is invalid, real proc stops when it comes to
365a46c6196SLaurent Vivier 	 * overwrite the register.
366a46c6196SLaurent Vivier 	 * In all the cases, the register must stay untouched
367a46c6196SLaurent Vivier 	 */
368a299895bSThomas Huth 	report(regs[1] == (uint64_t)addr, "Don't overwrite Rb");
369a46c6196SLaurent Vivier 
3700c111b37SThomas Huth #endif
3710c111b37SThomas Huth 
372a46c6196SLaurent Vivier 	report_prefix_pop();
373a46c6196SLaurent Vivier }
374a46c6196SLaurent Vivier 
main(int argc,char ** argv)375be9b007bSLaurent Vivier int main(int argc, char **argv)
376be9b007bSLaurent Vivier {
377be9b007bSLaurent Vivier 	int i;
378be9b007bSLaurent Vivier 
379be9b007bSLaurent Vivier 	handle_exception(0x700, program_check_handler, (void *)&is_invalid);
380*cd27b4baSNicholas Piggin 	if (cpu_has_heai)
381*cd27b4baSNicholas Piggin 		handle_exception(0xe40, heai_handler, (void *)&is_invalid);
382c6699676SLaurent Vivier 	handle_exception(0x600, alignment_handler, (void *)&alignment);
383be9b007bSLaurent Vivier 
384a9abb1b8SAndrew Jones 	for (i = 1; i < argc; i++) {
385be9b007bSLaurent Vivier 		if (strcmp(argv[i], "-v") == 0) {
386be9b007bSLaurent Vivier 			verbose = 1;
387be9b007bSLaurent Vivier 		}
388be9b007bSLaurent Vivier 	}
389be9b007bSLaurent Vivier 
390be9b007bSLaurent Vivier 	report_prefix_push("emulator");
391be9b007bSLaurent Vivier 
3928260a156SLaurent Vivier 	test_64bit();
393be9b007bSLaurent Vivier 	test_illegal();
394a46c6196SLaurent Vivier 	test_lswx();
3951672d173SThomas Huth 	test_lswi();
396be9b007bSLaurent Vivier 
397be9b007bSLaurent Vivier 	report_prefix_pop();
398be9b007bSLaurent Vivier 
399be9b007bSLaurent Vivier 	return report_summary();
400be9b007bSLaurent Vivier }
401