xref: /kvm-unit-tests/powerpc/emulator.c (revision fd6aada0dac74cf00e2d0b701362e1f89d2c28e3)
1 /*
2  * Test some powerpc instructions
3  */
4 
5 #include <libcflat.h>
6 #include <asm/processor.h>
7 
8 static int verbose;
9 static int volatile is_invalid;
10 static int volatile alignment;
11 
12 static void program_check_handler(struct pt_regs *regs, void *opaque)
13 {
14 	int *data = opaque;
15 
16 	if (verbose) {
17 		printf("Detected invalid instruction %#018lx: %08x\n",
18 		       regs->nip, *(uint32_t*)regs->nip);
19 	}
20 
21 	/* the result is bit 16 to 19 of SRR1
22 	 * bit 0: SRR0 contains the address of the next instruction
23 	 * bit 1: Trap
24 	 * bit 2: Privileged instruction
25 	 * bit 3: Illegal instruction
26 	 * bit 4: FP enabled exception type
27 	 */
28 
29 	*data = regs->msr >> 16;
30 
31 	regs->nip += 4;
32 }
33 
34 static void alignment_handler(struct pt_regs *regs, void *opaque)
35 {
36 	int *data = opaque;
37 
38 	if (verbose) {
39 		printf("Detected alignment exception %#018lx: %08x\n",
40 		       regs->nip, *(uint32_t*)regs->nip);
41 	}
42 
43 	*data = 1;
44 
45 	regs->nip += 4;
46 }
47 
48 static void test_illegal(void)
49 {
50 	report_prefix_push("invalid");
51 
52 	is_invalid = 0;
53 
54 	asm volatile (".long 0");
55 
56 	report("exception", is_invalid == 8); /* illegal instruction */
57 
58 	report_prefix_pop();
59 }
60 
61 static void test_64bit(void)
62 {
63 	uint64_t msr;
64 
65 	report_prefix_push("64bit");
66 
67 	asm("mfmsr %[msr]": [msr] "=r" (msr));
68 
69 	report("detected", msr & 0x8000000000000000UL);
70 
71 	report_prefix_pop();
72 }
73 
74 /**
75  * Test 'Load String Word Immediate' instruction
76  */
77 static void test_lswi(void)
78 {
79 	int i;
80 	char addr[128];
81 	uint64_t regs[32];
82 
83 	report_prefix_push("lswi");
84 
85 	/* fill memory with sequence */
86 	for (i = 0; i < 128; i++)
87 		addr[i] = 1 + i;
88 
89 	/* check incomplete register filling */
90 	alignment = 0;
91 	asm volatile ("li r12,-1;"
92 		      "mr r11, r12;"
93 		      "lswi r11, %[addr], %[len];"
94 		      "std r11, 0*8(%[regs]);"
95 		      "std r12, 1*8(%[regs]);"
96 		      ::
97 		      [len] "i" (3),
98 		      [addr] "b" (addr),
99 		      [regs] "r" (regs)
100 		      :
101 		      "r11", "r12", "memory");
102 
103 #if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
104 	/*
105 	 * lswi is supposed to cause an alignment exception in little endian
106 	 * mode, but QEMU does not support it. So in case we do not get an
107 	 * exception, this is an expected failure and we run the other tests
108 	 */
109 	report_xfail("alignment", !alignment, alignment);
110 	if (alignment) {
111 		report_prefix_pop();
112 		return;
113 	}
114 #endif
115 	report("partial", regs[0] == 0x01020300 && regs[1] == (uint64_t)-1);
116 
117 	/* check NB = 0 ==> 32 bytes. */
118 	asm volatile ("li r19,-1;"
119 		      "mr r11, r19; mr r12, r19; mr r13, r19;"
120 		      "mr r14, r19; mr r15, r19; mr r16, r19;"
121 		      "mr r17, r19; mr r18, r19;"
122 		      "lswi r11, %[addr], %[len];"
123 		      "std r11, 0*8(%[regs]);"
124 		      "std r12, 1*8(%[regs]);"
125 		      "std r13, 2*8(%[regs]);"
126 		      "std r14, 3*8(%[regs]);"
127 		      "std r15, 4*8(%[regs]);"
128 		      "std r16, 5*8(%[regs]);"
129 		      "std r17, 6*8(%[regs]);"
130 		      "std r18, 7*8(%[regs]);"
131 		      "std r19, 8*8(%[regs]);"
132 		      ::
133 		      [len] "i" (0),
134 		      [addr] "b" (addr),
135 		      [regs] "r" (regs)
136 		      :
137 		      /* as 32 is the number of bytes,
138 		       * we should modify 32/4 = 8 regs, from r11 to r18
139 		       * We check r19 is unmodified by filling it with 1s
140 		       * before the instruction.
141 		       */
142 		      "r11", "r12", "r13", "r14", "r15", "r16", "r17",
143 		      "r18", "r19", "memory");
144 
145 	report("length", regs[0] == 0x01020304 && regs[1] == 0x05060708 &&
146 			 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 &&
147 			 regs[4] == 0x11121314 && regs[5] == 0x15161718 &&
148 			 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 &&
149 			 regs[8] == (uint64_t)-1);
150 
151 	/* check wrap around to r0 */
152 	asm volatile ("li r31,-1;"
153 		      "mr r0, r31;"
154 		      "lswi r31, %[addr], %[len];"
155 		      "std r31, 0*8(%[regs]);"
156 		      "std r0, 1*8(%[regs]);"
157 		      ::
158 		      [len] "i" (8),
159 		      [addr] "b" (addr),
160 		      [regs] "r" (regs)
161 		      :
162 		      /* modify two registers from r31, wrap around to r0 */
163 		      "r31", "r0", "memory");
164 
165 	report("wrap around to r0", regs[0] == 0x01020304 &&
166 			            regs[1] == 0x05060708);
167 
168 	/* check wrap around doesn't break RA */
169 	asm volatile ("mr r29,r1\n"
170 		      "li r31,-1\n"
171 		      "mr r0,r31\n"
172 		      "mr r1, %[addr]\n"
173 		      ".long 0x7fe154aa\n"       /* lswi r31, r1, 10 */
174 		      "std r31, 0*8(%[regs])\n"
175 		      "std r0, 1*8(%[regs])\n"
176 		      "std r1, 2*8(%[regs])\n"
177 		      "mr r1,r29\n"
178 		      ::
179 		      [addr] "r" (addr),
180 		      [regs] "r" (regs)
181 		      :
182 		      /* loading three registers from r31 wraps around to r1,
183 		       * r1 is saved to r29, as adding it to the clobber
184 		       * list doesn't protect it
185 		       */
186 		      "r0", "r29", "r31", "memory");
187 
188 	/* doc says it is invalid, real proc stops when it comes to
189 	 * overwrite the register.
190 	 * In all the cases, the register must stay untouched
191 	 */
192 	report("Don't overwrite Ra", regs[2] == (uint64_t)addr);
193 
194 	report_prefix_pop();
195 }
196 
197 /*
198  * lswx: Load String Word Indexed X-form
199  *
200  *     lswx RT,RA,RB
201  *
202  * EA = (RA|0) + RB
203  * n  = XER
204  *
205  * Load n bytes from address EA into (n / 4) consecutive registers,
206  * throught RT -> RT + (n / 4) - 1.
207  * - Data are loaded into 4 low order bytes of registers (Word).
208  * - The unfilled bytes are set to 0.
209  * - The sequence of registers wraps around to GPR0.
210  * - if n == 0, content of RT is undefined
211  * - RT <= RA or RB < RT + (n + 4) is invalid or result is undefined
212  * - RT == RA == 0 is invalid
213  *
214  * For lswx in little-endian mode, an alignment interrupt always occurs.
215  *
216  */
217 
218 static void test_lswx(void)
219 {
220 	int i;
221 	char addr[128];
222 	uint64_t regs[32];
223 
224 	report_prefix_push("lswx");
225 
226 	/* fill memory with sequence */
227 
228 	for (i = 0; i < 128; i++)
229 		addr[i] = 1 + i;
230 
231 	/* check incomplete register filling */
232 
233 	alignment = 0;
234 	asm volatile ("mtxer %[len];"
235 		      "li r12,-1;"
236 		      "mr r11, r12;"
237 		      "lswx r11, 0, %[addr];"
238 		      "std r11, 0*8(%[regs]);"
239 		      "std r12, 1*8(%[regs]);"
240 		      ::
241 		      [len] "r" (3),
242 		      [addr] "r" (addr),
243 		      [regs] "r" (regs)
244 		      :
245 		      "xer", "r11", "r12", "memory");
246 
247 #if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
248 	/*
249 	 * lswx is supposed to cause an alignment exception in little endian
250 	 * mode, but QEMU does not support it. So in case we do not get an
251 	 * exception, this is an expected failure and we run the other tests
252 	 */
253 	report_xfail("alignment", !alignment, alignment);
254 	if (alignment) {
255 		report_prefix_pop();
256 		return;
257 	}
258 #endif
259 	report("partial", regs[0] == 0x01020300 && regs[1] == (uint64_t)-1);
260 
261 	/* check an old know bug: the number of bytes is used as
262 	 * the number of registers, so try 32 bytes.
263 	 */
264 
265 	asm volatile ("mtxer %[len];"
266 		      "li r19,-1;"
267 		      "mr r11, r19; mr r12, r19; mr r13, r19;"
268 		      "mr r14, r19; mr r15, r19; mr r16, r19;"
269 		      "mr r17, r19; mr r18, r19;"
270 		      "lswx r11, 0, %[addr];"
271 		      "std r11, 0*8(%[regs]);"
272 		      "std r12, 1*8(%[regs]);"
273 		      "std r13, 2*8(%[regs]);"
274 		      "std r14, 3*8(%[regs]);"
275 		      "std r15, 4*8(%[regs]);"
276 		      "std r16, 5*8(%[regs]);"
277 		      "std r17, 6*8(%[regs]);"
278 		      "std r18, 7*8(%[regs]);"
279 		      "std r19, 8*8(%[regs]);"
280 		      ::
281 		      [len] "r" (32),
282 		      [addr] "r" (addr),
283 		      [regs] "r" (regs)
284 		      :
285 		      /* as 32 is the number of bytes,
286 		       * we should modify 32/4 = 8 regs, from r11 to r18
287 		       * We check r19 is unmodified by filling it with 1s
288 		       * before the instruction.
289 		       */
290 		      "xer", "r11", "r12", "r13", "r14", "r15", "r16", "r17",
291 		      "r18", "r19", "memory");
292 
293 	report("length", regs[0] == 0x01020304 && regs[1] == 0x05060708 &&
294 			 regs[2] == 0x090a0b0c && regs[3] == 0x0d0e0f10 &&
295 			 regs[4] == 0x11121314 && regs[5] == 0x15161718 &&
296 			 regs[6] == 0x191a1b1c && regs[7] == 0x1d1e1f20 &&
297 			 regs[8] == (uint64_t)-1);
298 
299 	/* check wrap around to r0 */
300 
301 	asm volatile ("mtxer %[len];"
302 		      "li r31,-1;"
303 		      "mr r0, r31;"
304 		      "lswx r31, 0, %[addr];"
305 		      "std r31, 0*8(%[regs]);"
306 		      "std r0, 1*8(%[regs]);"
307 		      ::
308 		      [len] "r" (8),
309 		      [addr] "r" (addr),
310 		      [regs] "r" (regs)
311 		      :
312 		      /* modify two registers from r31, wrap around to r0 */
313 		      "xer", "r31", "r0", "memory");
314 
315 	report("wrap around to r0", regs[0] == 0x01020304 &&
316 			            regs[1] == 0x05060708);
317 
318 	/* check wrap around to r0 over RB doesn't break RB */
319 
320 	asm volatile ("mtxer %[len];"
321 		      "mr r29,r1;"
322 		      "li r31,-1;"
323 		      "mr r1,r31;"
324 		      "mr r0, %[addr];"
325 		      "lswx r31, 0, r0;"
326 		      "std r31, 0*8(%[regs]);"
327 		      "std r0, 1*8(%[regs]);"
328 		      "std r1, 2*8(%[regs]);"
329 		      "mr r1,r29;"
330 		      ::
331 		      [len] "r" (12),
332 		      [addr] "r" (addr),
333 		      [regs] "r" (regs)
334 		      :
335 		      /* loading three registers from r31 wraps around to r1,
336 		       * r1 is saved to r29, as adding it to the clobber
337 		       * list doesn't protect it
338 		       */
339 		      "xer", "r31", "r0", "r29", "memory");
340 
341 	/* doc says it is invalid, real proc stops when it comes to
342 	 * overwrite the register.
343 	 * In all the cases, the register must stay untouched
344 	 */
345 	report("Don't overwrite Rb", regs[1] == (uint64_t)addr);
346 
347 	report_prefix_pop();
348 }
349 
350 int main(int argc, char **argv)
351 {
352 	int i;
353 
354 	handle_exception(0x700, program_check_handler, (void *)&is_invalid);
355 	handle_exception(0x600, alignment_handler, (void *)&alignment);
356 
357 	for (i = 1; i < argc; i++) {
358 		if (strcmp(argv[i], "-v") == 0) {
359 			verbose = 1;
360 		}
361 	}
362 
363 	report_prefix_push("emulator");
364 
365 	test_64bit();
366 	test_illegal();
367 	test_lswx();
368 	test_lswi();
369 
370 	report_prefix_pop();
371 
372 	return report_summary();
373 }
374