1 /*
2 * Alpha emulation cpu helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/page-protection.h"
25 #include "exec/target_page.h"
26 #include "fpu/softfloat-types.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/qemu-print.h"
29 #include "system/memory.h"
30
31
32 #define CONVERT_BIT(X, SRC, DST) \
33 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
34
cpu_alpha_load_fpcr(CPUAlphaState * env)35 uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
36 {
37 return (uint64_t)env->fpcr << 32;
38 }
39
cpu_alpha_store_fpcr(CPUAlphaState * env,uint64_t val)40 void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
41 {
42 static const uint8_t rm_map[] = {
43 [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
44 [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
45 [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
46 [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
47 };
48
49 uint32_t fpcr = val >> 32;
50 uint32_t t = 0;
51
52 /* Record the raw value before adjusting for linux-user. */
53 env->fpcr = fpcr;
54
55 #ifdef CONFIG_USER_ONLY
56 /*
57 * Override some of these bits with the contents of ENV->SWCR.
58 * In system mode, some of these would trap to the kernel, at
59 * which point the kernel's handler would emulate and apply
60 * the software exception mask.
61 */
62 uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
63 fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
64
65 /*
66 * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
67 * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
68 * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
69 */
70 t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
71 #endif
72
73 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
74 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
75 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
76 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
77 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
78
79 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
80
81 env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
82 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
83
84 t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
85 #ifdef CONFIG_USER_ONLY
86 t |= (env->swcr & SWCR_MAP_UMZ) != 0;
87 #endif
88 env->fpcr_flush_to_zero = t;
89 }
90
helper_load_fpcr(CPUAlphaState * env)91 uint64_t helper_load_fpcr(CPUAlphaState *env)
92 {
93 return cpu_alpha_load_fpcr(env);
94 }
95
helper_store_fpcr(CPUAlphaState * env,uint64_t val)96 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
97 {
98 cpu_alpha_store_fpcr(env, val);
99 }
100
cpu_alpha_addr_gr(CPUAlphaState * env,unsigned reg)101 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
102 {
103 #ifndef CONFIG_USER_ONLY
104 if (env->flags & ENV_FLAG_PAL_MODE) {
105 if (reg >= 8 && reg <= 14) {
106 return &env->shadow[reg - 8];
107 } else if (reg == 25) {
108 return &env->shadow[7];
109 }
110 }
111 #endif
112 return &env->ir[reg];
113 }
114
cpu_alpha_load_gr(CPUAlphaState * env,unsigned reg)115 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
116 {
117 return *cpu_alpha_addr_gr(env, reg);
118 }
119
cpu_alpha_store_gr(CPUAlphaState * env,unsigned reg,uint64_t val)120 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
121 {
122 *cpu_alpha_addr_gr(env, reg) = val;
123 }
124
125 #if defined(CONFIG_USER_ONLY)
alpha_cpu_record_sigsegv(CPUState * cs,vaddr address,MMUAccessType access_type,bool maperr,uintptr_t retaddr)126 void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
127 MMUAccessType access_type,
128 bool maperr, uintptr_t retaddr)
129 {
130 CPUAlphaState *env = cpu_env(cs);
131 target_ulong mmcsr, cause;
132
133 /* Assuming !maperr, infer the missing protection. */
134 switch (access_type) {
135 case MMU_DATA_LOAD:
136 mmcsr = MM_K_FOR;
137 cause = 0;
138 break;
139 case MMU_DATA_STORE:
140 mmcsr = MM_K_FOW;
141 cause = 1;
142 break;
143 case MMU_INST_FETCH:
144 mmcsr = MM_K_FOE;
145 cause = -1;
146 break;
147 default:
148 g_assert_not_reached();
149 }
150 if (maperr) {
151 if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
152 /* Userspace address, therefore page not mapped. */
153 mmcsr = MM_K_TNV;
154 } else {
155 /* Kernel or invalid address. */
156 mmcsr = MM_K_ACV;
157 }
158 }
159
160 /* Record the arguments that PALcode would give to the kernel. */
161 env->trap_arg0 = address;
162 env->trap_arg1 = mmcsr;
163 env->trap_arg2 = cause;
164 }
165 #else
166 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
get_physical_address(CPUAlphaState * env,target_ulong addr,int prot_need,int mmu_idx,target_ulong * pphys,int * pprot)167 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
168 int prot_need, int mmu_idx,
169 target_ulong *pphys, int *pprot)
170 {
171 CPUState *cs = env_cpu(env);
172 target_long saddr = addr;
173 target_ulong phys = 0;
174 target_ulong L1pte, L2pte, L3pte;
175 target_ulong pt, index;
176 int prot = 0;
177 int ret = MM_K_ACV;
178
179 /* Handle physical accesses. */
180 if (mmu_idx == MMU_PHYS_IDX) {
181 phys = addr;
182 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
183 ret = -1;
184 goto exit;
185 }
186
187 /* Ensure that the virtual address is properly sign-extended from
188 the last implemented virtual address bit. */
189 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
190 goto exit;
191 }
192
193 /* Translate the superpage. */
194 /* ??? When we do more than emulate Unix PALcode, we'll need to
195 determine which KSEG is actually active. */
196 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
197 /* User-space cannot access KSEG addresses. */
198 if (mmu_idx != MMU_KERNEL_IDX) {
199 goto exit;
200 }
201
202 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
203 We would not do this if the 48-bit KSEG is enabled. */
204 phys = saddr & ((1ull << 40) - 1);
205 phys |= (saddr & (1ull << 40)) << 3;
206
207 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
208 ret = -1;
209 goto exit;
210 }
211
212 /* Interpret the page table exactly like PALcode does. */
213
214 pt = env->ptbr;
215
216 /* TODO: rather than using ldq_phys() to read the page table we should
217 * use address_space_ldq() so that we can handle the case when
218 * the page table read gives a bus fault, rather than ignoring it.
219 * For the existing code the zero data that ldq_phys will return for
220 * an access to invalid memory will result in our treating the page
221 * table as invalid, which may even be the right behaviour.
222 */
223
224 /* L1 page table read. */
225 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
226 L1pte = ldq_phys(cs->as, pt + index*8);
227
228 if (unlikely((L1pte & PTE_VALID) == 0)) {
229 ret = MM_K_TNV;
230 goto exit;
231 }
232 if (unlikely((L1pte & PTE_KRE) == 0)) {
233 goto exit;
234 }
235 pt = L1pte >> 32 << TARGET_PAGE_BITS;
236
237 /* L2 page table read. */
238 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
239 L2pte = ldq_phys(cs->as, pt + index*8);
240
241 if (unlikely((L2pte & PTE_VALID) == 0)) {
242 ret = MM_K_TNV;
243 goto exit;
244 }
245 if (unlikely((L2pte & PTE_KRE) == 0)) {
246 goto exit;
247 }
248 pt = L2pte >> 32 << TARGET_PAGE_BITS;
249
250 /* L3 page table read. */
251 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
252 L3pte = ldq_phys(cs->as, pt + index*8);
253
254 phys = L3pte >> 32 << TARGET_PAGE_BITS;
255 if (unlikely((L3pte & PTE_VALID) == 0)) {
256 ret = MM_K_TNV;
257 goto exit;
258 }
259
260 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
261 # error page bits out of date
262 #endif
263
264 /* Check access violations. */
265 if (L3pte & (PTE_KRE << mmu_idx)) {
266 prot |= PAGE_READ | PAGE_EXEC;
267 }
268 if (L3pte & (PTE_KWE << mmu_idx)) {
269 prot |= PAGE_WRITE;
270 }
271 if (unlikely((prot & prot_need) == 0 && prot_need)) {
272 goto exit;
273 }
274
275 /* Check fault-on-operation violations. */
276 prot &= ~(L3pte >> 1);
277 ret = -1;
278 if (unlikely((prot & prot_need) == 0)) {
279 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
280 prot_need & PAGE_WRITE ? MM_K_FOW :
281 prot_need & PAGE_READ ? MM_K_FOR : -1);
282 }
283
284 exit:
285 *pphys = phys;
286 *pprot = prot;
287 return ret;
288 }
289
alpha_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)290 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
291 {
292 target_ulong phys;
293 int prot, fail;
294
295 fail = get_physical_address(cpu_env(cs), addr, 0, 0, &phys, &prot);
296 return (fail >= 0 ? -1 : phys);
297 }
298
alpha_cpu_tlb_fill(CPUState * cs,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)299 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
300 MMUAccessType access_type, int mmu_idx,
301 bool probe, uintptr_t retaddr)
302 {
303 CPUAlphaState *env = cpu_env(cs);
304 target_ulong phys;
305 int prot, fail;
306
307 fail = get_physical_address(env, addr, 1 << access_type,
308 mmu_idx, &phys, &prot);
309 if (unlikely(fail >= 0)) {
310 if (probe) {
311 return false;
312 }
313 cs->exception_index = EXCP_MMFAULT;
314 env->trap_arg0 = addr;
315 env->trap_arg1 = fail;
316 env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
317 access_type == MMU_DATA_STORE ? 1ull :
318 /* access_type == MMU_INST_FETCH */ -1ull);
319 cpu_loop_exit_restore(cs, retaddr);
320 }
321
322 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
323 prot, mmu_idx, TARGET_PAGE_SIZE);
324 return true;
325 }
326
alpha_cpu_do_interrupt(CPUState * cs)327 void alpha_cpu_do_interrupt(CPUState *cs)
328 {
329 CPUAlphaState *env = cpu_env(cs);
330 int i = cs->exception_index;
331
332 if (qemu_loglevel_mask(CPU_LOG_INT)) {
333 static int count;
334 const char *name = "<unknown>";
335
336 switch (i) {
337 case EXCP_RESET:
338 name = "reset";
339 break;
340 case EXCP_MCHK:
341 name = "mchk";
342 break;
343 case EXCP_SMP_INTERRUPT:
344 name = "smp_interrupt";
345 break;
346 case EXCP_CLK_INTERRUPT:
347 name = "clk_interrupt";
348 break;
349 case EXCP_DEV_INTERRUPT:
350 name = "dev_interrupt";
351 break;
352 case EXCP_MMFAULT:
353 name = "mmfault";
354 break;
355 case EXCP_UNALIGN:
356 name = "unalign";
357 break;
358 case EXCP_OPCDEC:
359 name = "opcdec";
360 break;
361 case EXCP_ARITH:
362 name = "arith";
363 break;
364 case EXCP_FEN:
365 name = "fen";
366 break;
367 case EXCP_CALL_PAL:
368 name = "call_pal";
369 break;
370 }
371 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
372 PRIx64 " sp=%016" PRIx64 "\n",
373 ++count, name, env->error_code, cs->cpu_index,
374 env->pc, env->ir[IR_SP]);
375 }
376
377 cs->exception_index = -1;
378
379 switch (i) {
380 case EXCP_RESET:
381 i = 0x0000;
382 break;
383 case EXCP_MCHK:
384 i = 0x0080;
385 break;
386 case EXCP_SMP_INTERRUPT:
387 i = 0x0100;
388 break;
389 case EXCP_CLK_INTERRUPT:
390 i = 0x0180;
391 break;
392 case EXCP_DEV_INTERRUPT:
393 i = 0x0200;
394 break;
395 case EXCP_MMFAULT:
396 i = 0x0280;
397 break;
398 case EXCP_UNALIGN:
399 i = 0x0300;
400 break;
401 case EXCP_OPCDEC:
402 i = 0x0380;
403 break;
404 case EXCP_ARITH:
405 i = 0x0400;
406 break;
407 case EXCP_FEN:
408 i = 0x0480;
409 break;
410 case EXCP_CALL_PAL:
411 i = env->error_code;
412 /* There are 64 entry points for both privileged and unprivileged,
413 with bit 0x80 indicating unprivileged. Each entry point gets
414 64 bytes to do its job. */
415 if (i & 0x80) {
416 i = 0x2000 + (i - 0x80) * 64;
417 } else {
418 i = 0x1000 + i * 64;
419 }
420 break;
421 default:
422 cpu_abort(cs, "Unhandled CPU exception");
423 }
424
425 /* Remember where the exception happened. Emulate real hardware in
426 that the low bit of the PC indicates PALmode. */
427 env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
428
429 /* Continue execution at the PALcode entry point. */
430 env->pc = env->palbr + i;
431
432 /* Switch to PALmode. */
433 env->flags |= ENV_FLAG_PAL_MODE;
434 }
435
alpha_cpu_exec_interrupt(CPUState * cs,int interrupt_request)436 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
437 {
438 CPUAlphaState *env = cpu_env(cs);
439 int idx = -1;
440
441 /* We never take interrupts while in PALmode. */
442 if (env->flags & ENV_FLAG_PAL_MODE) {
443 return false;
444 }
445
446 /* Fall through the switch, collecting the highest priority
447 interrupt that isn't masked by the processor status IPL. */
448 /* ??? This hard-codes the OSF/1 interrupt levels. */
449 switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
450 case 0 ... 3:
451 if (interrupt_request & CPU_INTERRUPT_HARD) {
452 idx = EXCP_DEV_INTERRUPT;
453 }
454 /* FALLTHRU */
455 case 4:
456 if (interrupt_request & CPU_INTERRUPT_TIMER) {
457 idx = EXCP_CLK_INTERRUPT;
458 }
459 /* FALLTHRU */
460 case 5:
461 if (interrupt_request & CPU_INTERRUPT_SMP) {
462 idx = EXCP_SMP_INTERRUPT;
463 }
464 /* FALLTHRU */
465 case 6:
466 if (interrupt_request & CPU_INTERRUPT_MCHK) {
467 idx = EXCP_MCHK;
468 }
469 }
470 if (idx >= 0) {
471 cs->exception_index = idx;
472 env->error_code = 0;
473 alpha_cpu_do_interrupt(cs);
474 return true;
475 }
476 return false;
477 }
478
479 #endif /* !CONFIG_USER_ONLY */
480
alpha_cpu_dump_state(CPUState * cs,FILE * f,int flags)481 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
482 {
483 static const char linux_reg_names[31][4] = {
484 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
485 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
486 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
487 "t10", "t11", "ra", "t12", "at", "gp", "sp"
488 };
489 CPUAlphaState *env = cpu_env(cs);
490 int i;
491
492 qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n",
493 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
494 for (i = 0; i < 31; i++) {
495 qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
496 linux_reg_names[i], cpu_alpha_load_gr(env, i),
497 (i % 3) == 2 ? '\n' : ' ');
498 }
499
500 qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
501 env->lock_addr, env->lock_value);
502
503 if (flags & CPU_DUMP_FPU) {
504 for (i = 0; i < 31; i++) {
505 qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
506 (i % 3) == 2 ? '\n' : ' ');
507 }
508 qemu_fprintf(f, "fpcr %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
509 }
510 qemu_fprintf(f, "\n");
511 }
512
513 /* This should only be called from translate, via gen_excp.
514 We expect that ENV->PC has already been updated. */
helper_excp(CPUAlphaState * env,int excp,int error)515 G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error)
516 {
517 CPUState *cs = env_cpu(env);
518
519 cs->exception_index = excp;
520 env->error_code = error;
521 cpu_loop_exit(cs);
522 }
523
524 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
dynamic_excp(CPUAlphaState * env,uintptr_t retaddr,int excp,int error)525 G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
526 int excp, int error)
527 {
528 CPUState *cs = env_cpu(env);
529
530 cs->exception_index = excp;
531 env->error_code = error;
532 if (retaddr) {
533 cpu_restore_state(cs, retaddr);
534 /* Floating-point exceptions (our only users) point to the next PC. */
535 env->pc += 4;
536 }
537 cpu_loop_exit(cs);
538 }
539
arith_excp(CPUAlphaState * env,uintptr_t retaddr,int exc,uint64_t mask)540 G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr,
541 int exc, uint64_t mask)
542 {
543 env->trap_arg0 = exc;
544 env->trap_arg1 = mask;
545 dynamic_excp(env, retaddr, EXCP_ARITH, 0);
546 }
547