xref: /qemu/target/alpha/helper.c (revision 342e313d6c1a8e6da758bd642777b85af1a0fc37)
1 /*
2  *  Alpha emulation cpu helpers for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/cputlb.h"
24 #include "exec/page-protection.h"
25 #include "fpu/softfloat-types.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/qemu-print.h"
28 #include "system/memory.h"
29 
30 
31 #define CONVERT_BIT(X, SRC, DST) \
32     (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
33 
34 uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
35 {
36     return (uint64_t)env->fpcr << 32;
37 }
38 
39 void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
40 {
41     static const uint8_t rm_map[] = {
42         [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
43         [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
44         [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
45         [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
46     };
47 
48     uint32_t fpcr = val >> 32;
49     uint32_t t = 0;
50 
51     /* Record the raw value before adjusting for linux-user.  */
52     env->fpcr = fpcr;
53 
54 #ifdef CONFIG_USER_ONLY
55     /*
56      * Override some of these bits with the contents of ENV->SWCR.
57      * In system mode, some of these would trap to the kernel, at
58      * which point the kernel's handler would emulate and apply
59      * the software exception mask.
60      */
61     uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
62     fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
63 
64     /*
65      * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
66      * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
67      * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
68      */
69     t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
70 #endif
71 
72     t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
73     t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
74     t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
75     t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
76     t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
77 
78     env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
79 
80     env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
81     env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
82 
83     t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
84 #ifdef CONFIG_USER_ONLY
85     t |= (env->swcr & SWCR_MAP_UMZ) != 0;
86 #endif
87     env->fpcr_flush_to_zero = t;
88 }
89 
90 uint64_t helper_load_fpcr(CPUAlphaState *env)
91 {
92     return cpu_alpha_load_fpcr(env);
93 }
94 
95 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
96 {
97     cpu_alpha_store_fpcr(env, val);
98 }
99 
100 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
101 {
102 #ifndef CONFIG_USER_ONLY
103     if (env->flags & ENV_FLAG_PAL_MODE) {
104         if (reg >= 8 && reg <= 14) {
105             return &env->shadow[reg - 8];
106         } else if (reg == 25) {
107             return &env->shadow[7];
108         }
109     }
110 #endif
111     return &env->ir[reg];
112 }
113 
114 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
115 {
116     return *cpu_alpha_addr_gr(env, reg);
117 }
118 
119 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
120 {
121     *cpu_alpha_addr_gr(env, reg) = val;
122 }
123 
124 #if defined(CONFIG_USER_ONLY)
125 void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
126                               MMUAccessType access_type,
127                               bool maperr, uintptr_t retaddr)
128 {
129     CPUAlphaState *env = cpu_env(cs);
130     target_ulong mmcsr, cause;
131 
132     /* Assuming !maperr, infer the missing protection. */
133     switch (access_type) {
134     case MMU_DATA_LOAD:
135         mmcsr = MM_K_FOR;
136         cause = 0;
137         break;
138     case MMU_DATA_STORE:
139         mmcsr = MM_K_FOW;
140         cause = 1;
141         break;
142     case MMU_INST_FETCH:
143         mmcsr = MM_K_FOE;
144         cause = -1;
145         break;
146     default:
147         g_assert_not_reached();
148     }
149     if (maperr) {
150         if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
151             /* Userspace address, therefore page not mapped. */
152             mmcsr = MM_K_TNV;
153         } else {
154             /* Kernel or invalid address. */
155             mmcsr = MM_K_ACV;
156         }
157     }
158 
159     /* Record the arguments that PALcode would give to the kernel. */
160     env->trap_arg0 = address;
161     env->trap_arg1 = mmcsr;
162     env->trap_arg2 = cause;
163 }
164 #else
165 /* Returns the OSF/1 entMM failure indication, or -1 on success.  */
166 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
167                                 int prot_need, int mmu_idx,
168                                 target_ulong *pphys, int *pprot)
169 {
170     CPUState *cs = env_cpu(env);
171     target_long saddr = addr;
172     target_ulong phys = 0;
173     target_ulong L1pte, L2pte, L3pte;
174     target_ulong pt, index;
175     int prot = 0;
176     int ret = MM_K_ACV;
177 
178     /* Handle physical accesses.  */
179     if (mmu_idx == MMU_PHYS_IDX) {
180         phys = addr;
181         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
182         ret = -1;
183         goto exit;
184     }
185 
186     /* Ensure that the virtual address is properly sign-extended from
187        the last implemented virtual address bit.  */
188     if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
189         goto exit;
190     }
191 
192     /* Translate the superpage.  */
193     /* ??? When we do more than emulate Unix PALcode, we'll need to
194        determine which KSEG is actually active.  */
195     if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
196         /* User-space cannot access KSEG addresses.  */
197         if (mmu_idx != MMU_KERNEL_IDX) {
198             goto exit;
199         }
200 
201         /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
202            We would not do this if the 48-bit KSEG is enabled.  */
203         phys = saddr & ((1ull << 40) - 1);
204         phys |= (saddr & (1ull << 40)) << 3;
205 
206         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
207         ret = -1;
208         goto exit;
209     }
210 
211     /* Interpret the page table exactly like PALcode does.  */
212 
213     pt = env->ptbr;
214 
215     /* TODO: rather than using ldq_phys() to read the page table we should
216      * use address_space_ldq() so that we can handle the case when
217      * the page table read gives a bus fault, rather than ignoring it.
218      * For the existing code the zero data that ldq_phys will return for
219      * an access to invalid memory will result in our treating the page
220      * table as invalid, which may even be the right behaviour.
221      */
222 
223     /* L1 page table read.  */
224     index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
225     L1pte = ldq_phys(cs->as, pt + index*8);
226 
227     if (unlikely((L1pte & PTE_VALID) == 0)) {
228         ret = MM_K_TNV;
229         goto exit;
230     }
231     if (unlikely((L1pte & PTE_KRE) == 0)) {
232         goto exit;
233     }
234     pt = L1pte >> 32 << TARGET_PAGE_BITS;
235 
236     /* L2 page table read.  */
237     index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
238     L2pte = ldq_phys(cs->as, pt + index*8);
239 
240     if (unlikely((L2pte & PTE_VALID) == 0)) {
241         ret = MM_K_TNV;
242         goto exit;
243     }
244     if (unlikely((L2pte & PTE_KRE) == 0)) {
245         goto exit;
246     }
247     pt = L2pte >> 32 << TARGET_PAGE_BITS;
248 
249     /* L3 page table read.  */
250     index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
251     L3pte = ldq_phys(cs->as, pt + index*8);
252 
253     phys = L3pte >> 32 << TARGET_PAGE_BITS;
254     if (unlikely((L3pte & PTE_VALID) == 0)) {
255         ret = MM_K_TNV;
256         goto exit;
257     }
258 
259 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
260 # error page bits out of date
261 #endif
262 
263     /* Check access violations.  */
264     if (L3pte & (PTE_KRE << mmu_idx)) {
265         prot |= PAGE_READ | PAGE_EXEC;
266     }
267     if (L3pte & (PTE_KWE << mmu_idx)) {
268         prot |= PAGE_WRITE;
269     }
270     if (unlikely((prot & prot_need) == 0 && prot_need)) {
271         goto exit;
272     }
273 
274     /* Check fault-on-operation violations.  */
275     prot &= ~(L3pte >> 1);
276     ret = -1;
277     if (unlikely((prot & prot_need) == 0)) {
278         ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
279                prot_need & PAGE_WRITE ? MM_K_FOW :
280                prot_need & PAGE_READ ? MM_K_FOR : -1);
281     }
282 
283  exit:
284     *pphys = phys;
285     *pprot = prot;
286     return ret;
287 }
288 
289 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
290 {
291     target_ulong phys;
292     int prot, fail;
293 
294     fail = get_physical_address(cpu_env(cs), addr, 0, 0, &phys, &prot);
295     return (fail >= 0 ? -1 : phys);
296 }
297 
298 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
299                         MMUAccessType access_type, int mmu_idx,
300                         bool probe, uintptr_t retaddr)
301 {
302     CPUAlphaState *env = cpu_env(cs);
303     target_ulong phys;
304     int prot, fail;
305 
306     fail = get_physical_address(env, addr, 1 << access_type,
307                                 mmu_idx, &phys, &prot);
308     if (unlikely(fail >= 0)) {
309         if (probe) {
310             return false;
311         }
312         cs->exception_index = EXCP_MMFAULT;
313         env->trap_arg0 = addr;
314         env->trap_arg1 = fail;
315         env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
316                           access_type == MMU_DATA_STORE ? 1ull :
317                           /* access_type == MMU_INST_FETCH */ -1ull);
318         cpu_loop_exit_restore(cs, retaddr);
319     }
320 
321     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
322                  prot, mmu_idx, TARGET_PAGE_SIZE);
323     return true;
324 }
325 
326 void alpha_cpu_do_interrupt(CPUState *cs)
327 {
328     CPUAlphaState *env = cpu_env(cs);
329     int i = cs->exception_index;
330 
331     if (qemu_loglevel_mask(CPU_LOG_INT)) {
332         static int count;
333         const char *name = "<unknown>";
334 
335         switch (i) {
336         case EXCP_RESET:
337             name = "reset";
338             break;
339         case EXCP_MCHK:
340             name = "mchk";
341             break;
342         case EXCP_SMP_INTERRUPT:
343             name = "smp_interrupt";
344             break;
345         case EXCP_CLK_INTERRUPT:
346             name = "clk_interrupt";
347             break;
348         case EXCP_DEV_INTERRUPT:
349             name = "dev_interrupt";
350             break;
351         case EXCP_MMFAULT:
352             name = "mmfault";
353             break;
354         case EXCP_UNALIGN:
355             name = "unalign";
356             break;
357         case EXCP_OPCDEC:
358             name = "opcdec";
359             break;
360         case EXCP_ARITH:
361             name = "arith";
362             break;
363         case EXCP_FEN:
364             name = "fen";
365             break;
366         case EXCP_CALL_PAL:
367             name = "call_pal";
368             break;
369         }
370         qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
371                  PRIx64 " sp=%016" PRIx64 "\n",
372                  ++count, name, env->error_code, cs->cpu_index,
373                  env->pc, env->ir[IR_SP]);
374     }
375 
376     cs->exception_index = -1;
377 
378     switch (i) {
379     case EXCP_RESET:
380         i = 0x0000;
381         break;
382     case EXCP_MCHK:
383         i = 0x0080;
384         break;
385     case EXCP_SMP_INTERRUPT:
386         i = 0x0100;
387         break;
388     case EXCP_CLK_INTERRUPT:
389         i = 0x0180;
390         break;
391     case EXCP_DEV_INTERRUPT:
392         i = 0x0200;
393         break;
394     case EXCP_MMFAULT:
395         i = 0x0280;
396         break;
397     case EXCP_UNALIGN:
398         i = 0x0300;
399         break;
400     case EXCP_OPCDEC:
401         i = 0x0380;
402         break;
403     case EXCP_ARITH:
404         i = 0x0400;
405         break;
406     case EXCP_FEN:
407         i = 0x0480;
408         break;
409     case EXCP_CALL_PAL:
410         i = env->error_code;
411         /* There are 64 entry points for both privileged and unprivileged,
412            with bit 0x80 indicating unprivileged.  Each entry point gets
413            64 bytes to do its job.  */
414         if (i & 0x80) {
415             i = 0x2000 + (i - 0x80) * 64;
416         } else {
417             i = 0x1000 + i * 64;
418         }
419         break;
420     default:
421         cpu_abort(cs, "Unhandled CPU exception");
422     }
423 
424     /* Remember where the exception happened.  Emulate real hardware in
425        that the low bit of the PC indicates PALmode.  */
426     env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
427 
428     /* Continue execution at the PALcode entry point.  */
429     env->pc = env->palbr + i;
430 
431     /* Switch to PALmode.  */
432     env->flags |= ENV_FLAG_PAL_MODE;
433 }
434 
435 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
436 {
437     CPUAlphaState *env = cpu_env(cs);
438     int idx = -1;
439 
440     /* We never take interrupts while in PALmode.  */
441     if (env->flags & ENV_FLAG_PAL_MODE) {
442         return false;
443     }
444 
445     /* Fall through the switch, collecting the highest priority
446        interrupt that isn't masked by the processor status IPL.  */
447     /* ??? This hard-codes the OSF/1 interrupt levels.  */
448     switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
449     case 0 ... 3:
450         if (interrupt_request & CPU_INTERRUPT_HARD) {
451             idx = EXCP_DEV_INTERRUPT;
452         }
453         /* FALLTHRU */
454     case 4:
455         if (interrupt_request & CPU_INTERRUPT_TIMER) {
456             idx = EXCP_CLK_INTERRUPT;
457         }
458         /* FALLTHRU */
459     case 5:
460         if (interrupt_request & CPU_INTERRUPT_SMP) {
461             idx = EXCP_SMP_INTERRUPT;
462         }
463         /* FALLTHRU */
464     case 6:
465         if (interrupt_request & CPU_INTERRUPT_MCHK) {
466             idx = EXCP_MCHK;
467         }
468     }
469     if (idx >= 0) {
470         cs->exception_index = idx;
471         env->error_code = 0;
472         alpha_cpu_do_interrupt(cs);
473         return true;
474     }
475     return false;
476 }
477 
478 #endif /* !CONFIG_USER_ONLY */
479 
480 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
481 {
482     static const char linux_reg_names[31][4] = {
483         "v0",  "t0",  "t1", "t2",  "t3", "t4", "t5", "t6",
484         "t7",  "s0",  "s1", "s2",  "s3", "s4", "s5", "fp",
485         "a0",  "a1",  "a2", "a3",  "a4", "a5", "t8", "t9",
486         "t10", "t11", "ra", "t12", "at", "gp", "sp"
487     };
488     CPUAlphaState *env = cpu_env(cs);
489     int i;
490 
491     qemu_fprintf(f, "PC      " TARGET_FMT_lx " PS      %02x\n",
492                  env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
493     for (i = 0; i < 31; i++) {
494         qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
495                      linux_reg_names[i], cpu_alpha_load_gr(env, i),
496                      (i % 3) == 2 ? '\n' : ' ');
497     }
498 
499     qemu_fprintf(f, "lock_a  " TARGET_FMT_lx " lock_v  " TARGET_FMT_lx "\n",
500                  env->lock_addr, env->lock_value);
501 
502     if (flags & CPU_DUMP_FPU) {
503         for (i = 0; i < 31; i++) {
504             qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
505                          (i % 3) == 2 ? '\n' : ' ');
506         }
507         qemu_fprintf(f, "fpcr    %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
508     }
509     qemu_fprintf(f, "\n");
510 }
511 
512 /* This should only be called from translate, via gen_excp.
513    We expect that ENV->PC has already been updated.  */
514 G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error)
515 {
516     CPUState *cs = env_cpu(env);
517 
518     cs->exception_index = excp;
519     env->error_code = error;
520     cpu_loop_exit(cs);
521 }
522 
523 /* This may be called from any of the helpers to set up EXCEPTION_INDEX.  */
524 G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
525                              int excp, int error)
526 {
527     CPUState *cs = env_cpu(env);
528 
529     cs->exception_index = excp;
530     env->error_code = error;
531     if (retaddr) {
532         cpu_restore_state(cs, retaddr);
533         /* Floating-point exceptions (our only users) point to the next PC.  */
534         env->pc += 4;
535     }
536     cpu_loop_exit(cs);
537 }
538 
539 G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr,
540                            int exc, uint64_t mask)
541 {
542     env->trap_arg0 = exc;
543     env->trap_arg1 = mask;
544     dynamic_excp(env, retaddr, EXCP_ARITH, 0);
545 }
546