xref: /qemu/target/arm/tcg/op_helper.c (revision 5296a79b5a14d072af6fe2c8a8c00793862f18d4)
1 /*
2  *  ARM helper routines
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/target_page.h"
24 #include "internals.h"
25 #include "cpu-features.h"
26 #include "accel/tcg/cpu-ldst.h"
27 #include "accel/tcg/probe.h"
28 #include "cpregs.h"
29 
30 #define SIGNBIT (uint32_t)0x80000000
31 #define SIGNBIT64 ((uint64_t)1 << 63)
32 
exception_target_el(CPUARMState * env)33 int exception_target_el(CPUARMState *env)
34 {
35     int target_el = MAX(1, arm_current_el(env));
36 
37     /*
38      * No such thing as secure EL1 if EL3 is aarch32,
39      * so update the target EL to EL3 in this case.
40      */
41     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
42         target_el = 3;
43     }
44 
45     return target_el;
46 }
47 
raise_exception(CPUARMState * env,uint32_t excp,uint32_t syndrome,uint32_t target_el)48 void raise_exception(CPUARMState *env, uint32_t excp,
49                      uint32_t syndrome, uint32_t target_el)
50 {
51     CPUState *cs = env_cpu(env);
52 
53     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
54         /*
55          * Redirect NS EL1 exceptions to NS EL2. These are reported with
56          * their original syndrome register value, with the exception of
57          * SIMD/FP access traps, which are reported as uncategorized
58          * (see DDI0478C.a D1.10.4)
59          */
60         target_el = 2;
61         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
62             syndrome = syn_uncategorized();
63         }
64     }
65 
66     assert(!excp_is_internal(excp));
67     cs->exception_index = excp;
68     env->exception.syndrome = syndrome;
69     env->exception.target_el = target_el;
70     cpu_loop_exit(cs);
71 }
72 
raise_exception_ra(CPUARMState * env,uint32_t excp,uint32_t syndrome,uint32_t target_el,uintptr_t ra)73 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
74                         uint32_t target_el, uintptr_t ra)
75 {
76     CPUState *cs = env_cpu(env);
77 
78     /*
79      * restore_state_to_opc() will set env->exception.syndrome, so
80      * we must restore CPU state here before setting the syndrome
81      * the caller passed us, and cannot use cpu_loop_exit_restore().
82      */
83     cpu_restore_state(cs, ra);
84     raise_exception(env, excp, syndrome, target_el);
85 }
86 
HELPER(neon_tbl)87 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
88                           uint64_t ireg, uint64_t def)
89 {
90     uint64_t tmp, val = 0;
91     uint32_t maxindex = ((desc & 3) + 1) * 8;
92     uint32_t base_reg = desc >> 2;
93     uint32_t shift, index, reg;
94 
95     for (shift = 0; shift < 64; shift += 8) {
96         index = (ireg >> shift) & 0xff;
97         if (index < maxindex) {
98             reg = base_reg + (index >> 3);
99             tmp = *aa32_vfp_dreg(env, reg);
100             tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
101         } else {
102             tmp = def & (0xffull << shift);
103         }
104         val |= tmp;
105     }
106     return val;
107 }
108 
HELPER(v8m_stackcheck)109 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
110 {
111     /*
112      * Perform the v8M stack limit check for SP updates from translated code,
113      * raising an exception if the limit is breached.
114      */
115     if (newvalue < v7m_sp_limit(env)) {
116         /*
117          * Stack limit exceptions are a rare case, so rather than syncing
118          * PC/condbits before the call, we use raise_exception_ra() so
119          * that cpu_restore_state() will sort them out.
120          */
121         raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
122     }
123 }
124 
125 /* Sign/zero extend */
HELPER(sxtb16)126 uint32_t HELPER(sxtb16)(uint32_t x)
127 {
128     uint32_t res;
129     res = (uint16_t)(int8_t)x;
130     res |= (uint32_t)(int8_t)(x >> 16) << 16;
131     return res;
132 }
133 
handle_possible_div0_trap(CPUARMState * env,uintptr_t ra)134 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
135 {
136     /*
137      * Take a division-by-zero exception if necessary; otherwise return
138      * to get the usual non-trapping division behaviour (result of 0)
139      */
140     if (arm_feature(env, ARM_FEATURE_M)
141         && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
142         raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
143     }
144 }
145 
HELPER(uxtb16)146 uint32_t HELPER(uxtb16)(uint32_t x)
147 {
148     uint32_t res;
149     res = (uint16_t)(uint8_t)x;
150     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
151     return res;
152 }
153 
HELPER(sdiv)154 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
155 {
156     if (den == 0) {
157         handle_possible_div0_trap(env, GETPC());
158         return 0;
159     }
160     if (num == INT_MIN && den == -1) {
161         return INT_MIN;
162     }
163     return num / den;
164 }
165 
HELPER(udiv)166 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
167 {
168     if (den == 0) {
169         handle_possible_div0_trap(env, GETPC());
170         return 0;
171     }
172     return num / den;
173 }
174 
HELPER(rbit)175 uint32_t HELPER(rbit)(uint32_t x)
176 {
177     return revbit32(x);
178 }
179 
HELPER(add_setq)180 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
181 {
182     uint32_t res = a + b;
183     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
184         env->QF = 1;
185     return res;
186 }
187 
HELPER(add_saturate)188 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
189 {
190     uint32_t res = a + b;
191     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
192         env->QF = 1;
193         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
194     }
195     return res;
196 }
197 
HELPER(sub_saturate)198 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
199 {
200     uint32_t res = a - b;
201     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
202         env->QF = 1;
203         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
204     }
205     return res;
206 }
207 
HELPER(add_usaturate)208 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
209 {
210     uint32_t res = a + b;
211     if (res < a) {
212         env->QF = 1;
213         res = ~0;
214     }
215     return res;
216 }
217 
HELPER(sub_usaturate)218 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
219 {
220     uint32_t res = a - b;
221     if (res > a) {
222         env->QF = 1;
223         res = 0;
224     }
225     return res;
226 }
227 
228 /* Signed saturation.  */
do_ssat(CPUARMState * env,int32_t val,int shift)229 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
230 {
231     int32_t top;
232     uint32_t mask;
233 
234     top = val >> shift;
235     mask = (1u << shift) - 1;
236     if (top > 0) {
237         env->QF = 1;
238         return mask;
239     } else if (top < -1) {
240         env->QF = 1;
241         return ~mask;
242     }
243     return val;
244 }
245 
246 /* Unsigned saturation.  */
do_usat(CPUARMState * env,int32_t val,int shift)247 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
248 {
249     uint32_t max;
250 
251     max = (1u << shift) - 1;
252     if (val < 0) {
253         env->QF = 1;
254         return 0;
255     } else if (val > max) {
256         env->QF = 1;
257         return max;
258     }
259     return val;
260 }
261 
262 /* Signed saturate.  */
HELPER(ssat)263 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
264 {
265     return do_ssat(env, x, shift);
266 }
267 
268 /* Dual halfword signed saturate.  */
HELPER(ssat16)269 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
270 {
271     uint32_t res;
272 
273     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
274     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
275     return res;
276 }
277 
278 /* Unsigned saturate.  */
HELPER(usat)279 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
280 {
281     return do_usat(env, x, shift);
282 }
283 
284 /* Dual halfword unsigned saturate.  */
HELPER(usat16)285 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
286 {
287     uint32_t res;
288 
289     res = (uint16_t)do_usat(env, (int16_t)x, shift);
290     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
291     return res;
292 }
293 
HELPER(setend)294 void HELPER(setend)(CPUARMState *env)
295 {
296     env->uncached_cpsr ^= CPSR_E;
297     arm_rebuild_hflags(env);
298 }
299 
HELPER(check_bxj_trap)300 void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
301 {
302     /*
303      * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
304      * check if HSTR.TJDBX means we need to trap to EL2.
305      */
306     if (env->cp15.hstr_el2 & HSTR_TJDBX) {
307         /*
308          * We know the condition code check passed, so take the IMPDEF
309          * choice to always report CV=1 COND 0xe
310          */
311         uint32_t syn = syn_bxjtrap(1, 0xe, rm);
312         raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
313     }
314 }
315 
316 #ifndef CONFIG_USER_ONLY
317 /*
318  * Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
319  * The function returns the target EL (1-3) if the instruction is to be trapped;
320  * otherwise it returns 0 indicating it is not trapped.
321  * For a trap, *excp is updated with the EXCP_* trap type to use.
322  */
check_wfx_trap(CPUARMState * env,bool is_wfe,uint32_t * excp)323 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe, uint32_t *excp)
324 {
325     int cur_el = arm_current_el(env);
326     uint64_t mask;
327 
328     *excp = EXCP_UDEF;
329 
330     if (arm_feature(env, ARM_FEATURE_M)) {
331         /* M profile cores can never trap WFI/WFE. */
332         return 0;
333     }
334 
335     /* If we are currently in EL0 then we need to check if SCTLR is set up for
336      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
337      */
338     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
339         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
340         if (!(arm_sctlr(env, cur_el) & mask)) {
341             return exception_target_el(env);
342         }
343     }
344 
345     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
346      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
347      * bits will be zero indicating no trap.
348      */
349     if (cur_el < 2) {
350         mask = is_wfe ? HCR_TWE : HCR_TWI;
351         if (arm_hcr_el2_eff(env) & mask) {
352             return 2;
353         }
354     }
355 
356     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
357     if (arm_feature(env, ARM_FEATURE_V8) && !arm_is_el3_or_mon(env)) {
358         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
359         if (env->cp15.scr_el3 & mask) {
360             if (!arm_el_is_aa64(env, 3)) {
361                 *excp = EXCP_MON_TRAP;
362             }
363             return 3;
364         }
365     }
366 
367     return 0;
368 }
369 #endif
370 
HELPER(wfi)371 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
372 {
373 #ifdef CONFIG_USER_ONLY
374     /*
375      * WFI in the user-mode emulator is technically permitted but not
376      * something any real-world code would do. AArch64 Linux kernels
377      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
378      * AArch32 kernels don't trap it so it will delay a bit.
379      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
380      * would trigger an abort.
381      */
382     return;
383 #else
384     CPUState *cs = env_cpu(env);
385     uint32_t excp;
386     int target_el = check_wfx_trap(env, false, &excp);
387 
388     if (cpu_has_work(cs)) {
389         /* Don't bother to go into our "low power state" if
390          * we would just wake up immediately.
391          */
392         return;
393     }
394 
395     if (target_el) {
396         if (env->aarch64) {
397             env->pc -= insn_len;
398         } else {
399             env->regs[15] -= insn_len;
400         }
401 
402         raise_exception(env, excp, syn_wfx(1, 0xe, 0, insn_len == 2),
403                         target_el);
404     }
405 
406     cs->exception_index = EXCP_HLT;
407     cs->halted = 1;
408     cpu_loop_exit(cs);
409 #endif
410 }
411 
HELPER(wfit)412 void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
413 {
414 #ifdef CONFIG_USER_ONLY
415     /*
416      * WFI in the user-mode emulator is technically permitted but not
417      * something any real-world code would do. AArch64 Linux kernels
418      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
419      * AArch32 kernels don't trap it so it will delay a bit.
420      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
421      * would trigger an abort.
422      */
423     return;
424 #else
425     ARMCPU *cpu = env_archcpu(env);
426     CPUState *cs = env_cpu(env);
427     uint32_t excp;
428     int target_el = check_wfx_trap(env, false, &excp);
429     /* The WFIT should time out when CNTVCT_EL0 >= the specified value. */
430     uint64_t cntval = gt_get_countervalue(env);
431     /*
432      * We want the value that we would get if we read CNTVCT_EL0 from
433      * the current exception level, so the direct_access offset, not
434      * the indirect_access one. Compare the pseudocode LocalTimeoutEvent(),
435      * which calls VirtualCounterTimer().
436      */
437     uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
438     uint64_t cntvct = cntval - offset;
439     uint64_t nexttick;
440 
441     if (cpu_has_work(cs) || cntvct >= timeout) {
442         /*
443          * Don't bother to go into our "low power state" if
444          * we would just wake up immediately.
445          */
446         return;
447     }
448 
449     if (target_el) {
450         env->pc -= 4;
451         raise_exception(env, excp, syn_wfx(1, 0xe, 0, false), target_el);
452     }
453 
454     if (uadd64_overflow(timeout, offset, &nexttick)) {
455         nexttick = UINT64_MAX;
456     }
457     if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
458         /*
459          * If the timeout is too long for the signed 64-bit range
460          * of a QEMUTimer, let it expire early.
461          */
462         timer_mod_ns(cpu->wfxt_timer, INT64_MAX);
463     } else {
464         timer_mod(cpu->wfxt_timer, nexttick);
465     }
466     cs->exception_index = EXCP_HLT;
467     cs->halted = 1;
468     cpu_loop_exit(cs);
469 #endif
470 }
471 
HELPER(wfe)472 void HELPER(wfe)(CPUARMState *env)
473 {
474     /* This is a hint instruction that is semantically different
475      * from YIELD even though we currently implement it identically.
476      * Don't actually halt the CPU, just yield back to top
477      * level loop. This is not going into a "low power state"
478      * (ie halting until some event occurs), so we never take
479      * a configurable trap to a different exception level.
480      */
481     HELPER(yield)(env);
482 }
483 
HELPER(yield)484 void HELPER(yield)(CPUARMState *env)
485 {
486     CPUState *cs = env_cpu(env);
487 
488     /* This is a non-trappable hint instruction that generally indicates
489      * that the guest is currently busy-looping. Yield control back to the
490      * top level loop so that a more deserving VCPU has a chance to run.
491      */
492     cs->exception_index = EXCP_YIELD;
493     cpu_loop_exit(cs);
494 }
495 
496 /* Raise an internal-to-QEMU exception. This is limited to only
497  * those EXCP values which are special cases for QEMU to interrupt
498  * execution and not to be used for exceptions which are passed to
499  * the guest (those must all have syndrome information and thus should
500  * use exception_with_syndrome*).
501  */
HELPER(exception_internal)502 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
503 {
504     CPUState *cs = env_cpu(env);
505 
506     assert(excp_is_internal(excp));
507     cs->exception_index = excp;
508     cpu_loop_exit(cs);
509 }
510 
511 /* Raise an exception with the specified syndrome register value */
HELPER(exception_with_syndrome_el)512 void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
513                                         uint32_t syndrome, uint32_t target_el)
514 {
515     raise_exception(env, excp, syndrome, target_el);
516 }
517 
518 /*
519  * Raise an exception with the specified syndrome register value
520  * to the default target el.
521  */
HELPER(exception_with_syndrome)522 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
523                                      uint32_t syndrome)
524 {
525     raise_exception(env, excp, syndrome, exception_target_el(env));
526 }
527 
HELPER(cpsr_read)528 uint32_t HELPER(cpsr_read)(CPUARMState *env)
529 {
530     return cpsr_read(env) & ~CPSR_EXEC;
531 }
532 
HELPER(cpsr_write)533 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
534 {
535     cpsr_write(env, val, mask, CPSRWriteByInstr);
536     /* TODO: Not all cpsr bits are relevant to hflags.  */
537     arm_rebuild_hflags(env);
538 }
539 
540 /* Write the CPSR for a 32-bit exception return */
HELPER(cpsr_write_eret)541 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
542 {
543     uint32_t mask;
544 
545     bql_lock();
546     arm_call_pre_el_change_hook(env_archcpu(env));
547     bql_unlock();
548 
549     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
550     cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
551 
552     /* Generated code has already stored the new PC value, but
553      * without masking out its low bits, because which bits need
554      * masking depends on whether we're returning to Thumb or ARM
555      * state. Do the masking now.
556      */
557     env->regs[15] &= (env->thumb ? ~1 : ~3);
558     arm_rebuild_hflags(env);
559 
560     bql_lock();
561     arm_call_el_change_hook(env_archcpu(env));
562     bql_unlock();
563 }
564 
565 /* Access to user mode registers from privileged modes.  */
HELPER(get_user_reg)566 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
567 {
568     uint32_t val;
569 
570     if (regno == 13) {
571         val = env->banked_r13[BANK_USRSYS];
572     } else if (regno == 14) {
573         val = env->banked_r14[BANK_USRSYS];
574     } else if (regno >= 8
575                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
576         val = env->usr_regs[regno - 8];
577     } else {
578         val = env->regs[regno];
579     }
580     return val;
581 }
582 
HELPER(set_user_reg)583 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
584 {
585     if (regno == 13) {
586         env->banked_r13[BANK_USRSYS] = val;
587     } else if (regno == 14) {
588         env->banked_r14[BANK_USRSYS] = val;
589     } else if (regno >= 8
590                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
591         env->usr_regs[regno - 8] = val;
592     } else {
593         env->regs[regno] = val;
594     }
595 }
596 
HELPER(set_r13_banked)597 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
598 {
599     if ((env->uncached_cpsr & CPSR_M) == mode) {
600         env->regs[13] = val;
601     } else {
602         env->banked_r13[bank_number(mode)] = val;
603     }
604 }
605 
HELPER(get_r13_banked)606 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
607 {
608     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
609         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
610          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
611          */
612         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
613                         exception_target_el(env));
614     }
615 
616     if ((env->uncached_cpsr & CPSR_M) == mode) {
617         return env->regs[13];
618     } else {
619         return env->banked_r13[bank_number(mode)];
620     }
621 }
622 
msr_mrs_banked_exc_checks(CPUARMState * env,uint32_t tgtmode,uint32_t regno)623 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
624                                       uint32_t regno)
625 {
626     /* Raise an exception if the requested access is one of the UNPREDICTABLE
627      * cases; otherwise return. This broadly corresponds to the pseudocode
628      * BankedRegisterAccessValid() and SPSRAccessValid(),
629      * except that we have already handled some cases at translate time.
630      */
631     int curmode = env->uncached_cpsr & CPSR_M;
632 
633     if (tgtmode == ARM_CPU_MODE_HYP) {
634         /*
635          * Handle Hyp target regs first because some are special cases
636          * which don't want the usual "not accessible from tgtmode" check.
637          */
638         switch (regno) {
639         case 16 ... 17: /* ELR_Hyp, SPSR_Hyp */
640             if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
641                 goto undef;
642             }
643             break;
644         case 13:
645             if (curmode != ARM_CPU_MODE_MON) {
646                 goto undef;
647             }
648             break;
649         default:
650             g_assert_not_reached();
651         }
652         return;
653     }
654 
655     if (curmode == tgtmode) {
656         goto undef;
657     }
658 
659     if (tgtmode == ARM_CPU_MODE_USR) {
660         switch (regno) {
661         case 8 ... 12:
662             if (curmode != ARM_CPU_MODE_FIQ) {
663                 goto undef;
664             }
665             break;
666         case 13:
667             if (curmode == ARM_CPU_MODE_SYS) {
668                 goto undef;
669             }
670             break;
671         case 14:
672             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
673                 goto undef;
674             }
675             break;
676         default:
677             break;
678         }
679     }
680 
681     return;
682 
683 undef:
684     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
685                     exception_target_el(env));
686 }
687 
HELPER(msr_banked)688 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
689                         uint32_t regno)
690 {
691     msr_mrs_banked_exc_checks(env, tgtmode, regno);
692 
693     switch (regno) {
694     case 16: /* SPSRs */
695         if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
696             /* Only happens for SPSR_Hyp access in Hyp mode */
697             env->spsr = value;
698         } else {
699             env->banked_spsr[bank_number(tgtmode)] = value;
700         }
701         break;
702     case 17: /* ELR_Hyp */
703         env->elr_el[2] = value;
704         break;
705     case 13:
706         env->banked_r13[bank_number(tgtmode)] = value;
707         break;
708     case 14:
709         env->banked_r14[r14_bank_number(tgtmode)] = value;
710         break;
711     case 8 ... 12:
712         switch (tgtmode) {
713         case ARM_CPU_MODE_USR:
714             env->usr_regs[regno - 8] = value;
715             break;
716         case ARM_CPU_MODE_FIQ:
717             env->fiq_regs[regno - 8] = value;
718             break;
719         default:
720             g_assert_not_reached();
721         }
722         break;
723     default:
724         g_assert_not_reached();
725     }
726 }
727 
HELPER(mrs_banked)728 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
729 {
730     msr_mrs_banked_exc_checks(env, tgtmode, regno);
731 
732     switch (regno) {
733     case 16: /* SPSRs */
734         if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
735             /* Only happens for SPSR_Hyp access in Hyp mode */
736             return env->spsr;
737         } else {
738             return env->banked_spsr[bank_number(tgtmode)];
739         }
740     case 17: /* ELR_Hyp */
741         return env->elr_el[2];
742     case 13:
743         return env->banked_r13[bank_number(tgtmode)];
744     case 14:
745         return env->banked_r14[r14_bank_number(tgtmode)];
746     case 8 ... 12:
747         switch (tgtmode) {
748         case ARM_CPU_MODE_USR:
749             return env->usr_regs[regno - 8];
750         case ARM_CPU_MODE_FIQ:
751             return env->fiq_regs[regno - 8];
752         default:
753             g_assert_not_reached();
754         }
755     default:
756         g_assert_not_reached();
757     }
758 }
759 
HELPER(access_check_cp_reg)760 const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
761                                         uint32_t syndrome, uint32_t isread)
762 {
763     ARMCPU *cpu = env_archcpu(env);
764     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
765     CPAccessResult res = CP_ACCESS_OK;
766     int target_el;
767     uint32_t excp;
768 
769     assert(ri != NULL);
770 
771     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
772         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
773         res = CP_ACCESS_UNDEFINED;
774         goto fail;
775     }
776 
777     if (ri->accessfn) {
778         res = ri->accessfn(env, ri, isread);
779     }
780 
781     /*
782      * If the access function indicates a trap from EL0 to EL1 then
783      * that always takes priority over the HSTR_EL2 trap. (If it indicates
784      * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
785      * a trap to EL2, then the syndrome is the same either way so we don't
786      * care whether technically the architecture says that HSTR_EL2 trap or
787      * the other trap takes priority. So we take the "check HSTR_EL2" path
788      * for all of those cases.)
789      */
790     if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) < 2) &&
791         arm_current_el(env) == 0) {
792         goto fail;
793     }
794 
795     /*
796      * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
797      * we only need to check here for traps from EL0.
798      */
799     if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
800         arm_is_el2_enabled(env) &&
801         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
802         uint32_t mask = 1 << ri->crn;
803 
804         if (ri->type & ARM_CP_64BIT) {
805             mask = 1 << ri->crm;
806         }
807 
808         /* T4 and T14 are RES0 */
809         mask &= ~((1 << 4) | (1 << 14));
810 
811         if (env->cp15.hstr_el2 & mask) {
812             res = CP_ACCESS_TRAP_EL2;
813             goto fail;
814         }
815     }
816 
817     /*
818      * Fine-grained traps also are lower priority than undef-to-EL1,
819      * higher priority than trap-to-EL3, and we don't care about priority
820      * order with other EL2 traps because the syndrome value is the same.
821      */
822     if (arm_fgt_active(env, arm_current_el(env))) {
823         uint64_t trapword = 0;
824         unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
825         unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
826         bool rev = FIELD_EX32(ri->fgt, FGT, REV);
827         bool nxs = FIELD_EX32(ri->fgt, FGT, NXS);
828         bool trapbit;
829 
830         if (ri->fgt & FGT_EXEC) {
831             assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
832             trapword = env->cp15.fgt_exec[idx];
833         } else if (isread && (ri->fgt & FGT_R)) {
834             assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
835             trapword = env->cp15.fgt_read[idx];
836         } else if (!isread && (ri->fgt & FGT_W)) {
837             assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
838             trapword = env->cp15.fgt_write[idx];
839         }
840 
841         if (nxs && (arm_hcrx_el2_eff(env) & HCRX_FGTNXS)) {
842             /*
843              * If HCRX_EL2.FGTnXS is 1 then the fine-grained trap for
844              * TLBI maintenance insns does *not* apply to the nXS variant.
845              */
846             trapbit = 0;
847         } else {
848             trapbit = extract64(trapword, bitpos, 1);
849         }
850         if (trapbit != rev) {
851             res = CP_ACCESS_TRAP_EL2;
852             goto fail;
853         }
854     }
855 
856     if (likely(res == CP_ACCESS_OK)) {
857         return ri;
858     }
859 
860  fail:
861     excp = EXCP_UDEF;
862     switch (res) {
863         /* CP_ACCESS_TRAP* traps are always direct to a specified EL */
864     case CP_ACCESS_TRAP_EL3:
865         /*
866          * If EL3 is AArch32 then there's no syndrome register; the cases
867          * where we would raise a SystemAccessTrap to AArch64 EL3 all become
868          * raising a Monitor trap exception. (Because there's no visible
869          * syndrome it doesn't matter what we pass to raise_exception().)
870          */
871         if (!arm_el_is_aa64(env, 3)) {
872             excp = EXCP_MON_TRAP;
873         }
874         break;
875     case CP_ACCESS_TRAP_EL2:
876     case CP_ACCESS_TRAP_EL1:
877         break;
878     case CP_ACCESS_UNDEFINED:
879         /* CP_ACCESS_UNDEFINED is never direct to a specified EL */
880         if (cpu_isar_feature(aa64_ids, cpu) && isread &&
881             arm_cpreg_in_idspace(ri)) {
882             /*
883              * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
884              * not EC_UNCATEGORIZED
885              */
886             break;
887         }
888         syndrome = syn_uncategorized();
889         break;
890     default:
891         g_assert_not_reached();
892     }
893 
894     target_el = res & CP_ACCESS_EL_MASK;
895     switch (target_el) {
896     case 0:
897         target_el = exception_target_el(env);
898         break;
899     case 1:
900         assert(arm_current_el(env) < 2);
901         break;
902     case 2:
903         assert(arm_current_el(env) != 3);
904         assert(arm_is_el2_enabled(env));
905         break;
906     case 3:
907         assert(arm_feature(env, ARM_FEATURE_EL3));
908         break;
909     default:
910         g_assert_not_reached();
911     }
912 
913     raise_exception(env, excp, syndrome, target_el);
914 }
915 
HELPER(lookup_cp_reg)916 const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
917 {
918     ARMCPU *cpu = env_archcpu(env);
919     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
920 
921     assert(ri != NULL);
922     return ri;
923 }
924 
925 /*
926  * Test for HCR_EL2.TIDCP at EL1.
927  * Since implementation defined registers are rare, and within QEMU
928  * most of them are no-op, do not waste HFLAGS space for this and
929  * always use a helper.
930  */
HELPER(tidcp_el1)931 void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
932 {
933     if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
934         raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
935     }
936 }
937 
938 /*
939  * Similarly, for FEAT_TIDCP1 at EL0.
940  * We have already checked for the presence of the feature.
941  */
HELPER(tidcp_el0)942 void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
943 {
944     /* See arm_sctlr(), but we also need the sctlr el. */
945     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
946     int target_el;
947 
948     switch (mmu_idx) {
949     case ARMMMUIdx_E20_0:
950         target_el = 2;
951         break;
952     case ARMMMUIdx_E30_0:
953         target_el = 3;
954         break;
955     default:
956         target_el = 1;
957         break;
958     }
959 
960     /*
961      * The bit is not valid unless the target el is aa64, but since the
962      * bit test is simpler perform that first and check validity after.
963      */
964     if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
965         && arm_el_is_aa64(env, target_el)) {
966         raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
967     }
968 }
969 
HELPER(set_cp_reg)970 void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
971 {
972     const ARMCPRegInfo *ri = rip;
973 
974     if (ri->type & ARM_CP_IO) {
975         bql_lock();
976         ri->writefn(env, ri, value);
977         bql_unlock();
978     } else {
979         ri->writefn(env, ri, value);
980     }
981 }
982 
HELPER(get_cp_reg)983 uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
984 {
985     const ARMCPRegInfo *ri = rip;
986     uint32_t res;
987 
988     if (ri->type & ARM_CP_IO) {
989         bql_lock();
990         res = ri->readfn(env, ri);
991         bql_unlock();
992     } else {
993         res = ri->readfn(env, ri);
994     }
995 
996     return res;
997 }
998 
HELPER(set_cp_reg64)999 void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
1000 {
1001     const ARMCPRegInfo *ri = rip;
1002 
1003     if (ri->type & ARM_CP_IO) {
1004         bql_lock();
1005         ri->writefn(env, ri, value);
1006         bql_unlock();
1007     } else {
1008         ri->writefn(env, ri, value);
1009     }
1010 }
1011 
HELPER(get_cp_reg64)1012 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
1013 {
1014     const ARMCPRegInfo *ri = rip;
1015     uint64_t res;
1016 
1017     if (ri->type & ARM_CP_IO) {
1018         bql_lock();
1019         res = ri->readfn(env, ri);
1020         bql_unlock();
1021     } else {
1022         res = ri->readfn(env, ri);
1023     }
1024 
1025     return res;
1026 }
1027 
HELPER(pre_hvc)1028 void HELPER(pre_hvc)(CPUARMState *env)
1029 {
1030     ARMCPU *cpu = env_archcpu(env);
1031     int cur_el = arm_current_el(env);
1032     /* FIXME: Use actual secure state.  */
1033     bool secure = false;
1034     bool undef;
1035 
1036     if (arm_is_psci_call(cpu, EXCP_HVC)) {
1037         /* If PSCI is enabled and this looks like a valid PSCI call then
1038          * that overrides the architecturally mandated HVC behaviour.
1039          */
1040         return;
1041     }
1042 
1043     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1044         /* If EL2 doesn't exist, HVC always UNDEFs */
1045         undef = true;
1046     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
1047         /* EL3.HCE has priority over EL2.HCD. */
1048         undef = !(env->cp15.scr_el3 & SCR_HCE);
1049     } else {
1050         undef = env->cp15.hcr_el2 & HCR_HCD;
1051     }
1052 
1053     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
1054      * For ARMv8/AArch64, HVC is allowed in EL3.
1055      * Note that we've already trapped HVC from EL0 at translation
1056      * time.
1057      */
1058     if (secure && (!is_a64(env) || cur_el == 1)) {
1059         undef = true;
1060     }
1061 
1062     if (undef) {
1063         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1064                         exception_target_el(env));
1065     }
1066 }
1067 
HELPER(pre_smc)1068 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
1069 {
1070     ARMCPU *cpu = env_archcpu(env);
1071     int cur_el = arm_current_el(env);
1072     bool secure = arm_is_secure(env);
1073     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
1074 
1075     /*
1076      * SMC behaviour is summarized in the following table.
1077      * This helper handles the "Trap to EL2" and "Undef insn" cases.
1078      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
1079      * helper.
1080      *
1081      *  -> ARM_FEATURE_EL3 and !SMD
1082      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1083      *
1084      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1085      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
1086      *  Conduit not SMC          Trap to EL2         Trap to EL3
1087      *
1088      *
1089      *  -> ARM_FEATURE_EL3 and SMD
1090      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1091      *
1092      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1093      *  Conduit SMC, inval call  Trap to EL2         Undef insn
1094      *  Conduit not SMC          Trap to EL2         Undef insn
1095      *
1096      *
1097      *  -> !ARM_FEATURE_EL3
1098      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1099      *
1100      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1101      *  Conduit SMC, inval call  Trap to EL2         Undef insn
1102      *  Conduit not SMC          Undef or trap[1]    Undef insn
1103      *
1104      * [1] In this case:
1105      *  - if HCR_EL2.NV == 1 we must trap to EL2
1106      *  - if HCR_EL2.NV == 0 then newer architecture revisions permit
1107      *    AArch64 (but not AArch32) to trap to EL2 as an IMPDEF choice
1108      *  - otherwise we must UNDEF
1109      * We take the IMPDEF choice to always UNDEF if HCR_EL2.NV == 0.
1110      */
1111 
1112     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
1113      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
1114      *  extensions, SMD only applies to NS state.
1115      * On ARMv7 without the Virtualization extensions, the SMD bit
1116      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
1117      * so we need not special case this here.
1118      */
1119     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
1120                                                      : smd_flag && !secure;
1121 
1122     if (!arm_feature(env, ARM_FEATURE_EL3) &&
1123         !(arm_hcr_el2_eff(env) & HCR_NV) &&
1124         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
1125         /*
1126          * If we have no EL3 then traditionally SMC always UNDEFs and can't be
1127          * trapped to EL2. For nested virtualization, SMC can be trapped to
1128          * the outer hypervisor. PSCI-via-SMC is a sort of ersatz EL3
1129          * firmware within QEMU, and we want an EL2 guest to be able
1130          * to forbid its EL1 from making PSCI calls into QEMU's
1131          * "firmware" via HCR.TSC, so for these purposes treat
1132          * PSCI-via-SMC as implying an EL3.
1133          * This handles the very last line of the previous table.
1134          */
1135         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1136                         exception_target_el(env));
1137     }
1138 
1139     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
1140         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
1141          * We also want an EL2 guest to be able to forbid its EL1 from
1142          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
1143          * This handles all the "Trap to EL2" cases of the previous table.
1144          */
1145         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
1146     }
1147 
1148     /* Catch the two remaining "Undef insn" cases of the previous table:
1149      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
1150      *    - We don't have EL3 or SMD is set.
1151      */
1152     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
1153         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
1154         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1155                         exception_target_el(env));
1156     }
1157 }
1158 
1159 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1160    The only way to do that in TCG is a conditional branch, which clobbers
1161    all our temporaries.  For now implement these as helper functions.  */
1162 
1163 /* Similarly for variable shift instructions.  */
1164 
HELPER(shl_cc)1165 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1166 {
1167     int shift = i & 0xff;
1168     if (shift >= 32) {
1169         if (shift == 32)
1170             env->CF = x & 1;
1171         else
1172             env->CF = 0;
1173         return 0;
1174     } else if (shift != 0) {
1175         env->CF = (x >> (32 - shift)) & 1;
1176         return x << shift;
1177     }
1178     return x;
1179 }
1180 
HELPER(shr_cc)1181 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1182 {
1183     int shift = i & 0xff;
1184     if (shift >= 32) {
1185         if (shift == 32)
1186             env->CF = (x >> 31) & 1;
1187         else
1188             env->CF = 0;
1189         return 0;
1190     } else if (shift != 0) {
1191         env->CF = (x >> (shift - 1)) & 1;
1192         return x >> shift;
1193     }
1194     return x;
1195 }
1196 
HELPER(sar_cc)1197 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1198 {
1199     int shift = i & 0xff;
1200     if (shift >= 32) {
1201         env->CF = (x >> 31) & 1;
1202         return (int32_t)x >> 31;
1203     } else if (shift != 0) {
1204         env->CF = (x >> (shift - 1)) & 1;
1205         return (int32_t)x >> shift;
1206     }
1207     return x;
1208 }
1209 
HELPER(ror_cc)1210 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1211 {
1212     int shift1, shift;
1213     shift1 = i & 0xff;
1214     shift = shift1 & 0x1f;
1215     if (shift == 0) {
1216         if (shift1 != 0)
1217             env->CF = (x >> 31) & 1;
1218         return x;
1219     } else {
1220         env->CF = (x >> (shift - 1)) & 1;
1221         return ((uint32_t)x >> shift) | (x << (32 - shift));
1222     }
1223 }
1224 
HELPER(probe_access)1225 void HELPER(probe_access)(CPUARMState *env, vaddr ptr,
1226                           uint32_t access_type, uint32_t mmu_idx,
1227                           uint32_t size)
1228 {
1229     uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1230     uintptr_t ra = GETPC();
1231 
1232     if (likely(size <= in_page)) {
1233         probe_access(env, ptr, size, access_type, mmu_idx, ra);
1234     } else {
1235         probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1236         probe_access(env, ptr + in_page, size - in_page,
1237                      access_type, mmu_idx, ra);
1238     }
1239 }
1240 
1241 /*
1242  * This function corresponds to AArch64.vESBOperation().
1243  * Note that the AArch32 version is not functionally different.
1244  */
HELPER(vesb)1245 void HELPER(vesb)(CPUARMState *env)
1246 {
1247     /*
1248      * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1249      * and will return HCR_EL2.VSE == 0, so nothing happens.
1250      */
1251     uint64_t hcr = arm_hcr_el2_eff(env);
1252     bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1253     bool pending = enabled && (hcr & HCR_VSE);
1254     bool masked  = (env->daif & PSTATE_A);
1255 
1256     /* If VSE pending and masked, defer the exception.  */
1257     if (pending && masked) {
1258         uint32_t syndrome;
1259 
1260         if (arm_el_is_aa64(env, 1)) {
1261             /* Copy across IDS and ISS from VSESR. */
1262             syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1263         } else {
1264             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1265 
1266             if (extended_addresses_enabled(env)) {
1267                 syndrome = arm_fi_to_lfsc(&fi);
1268             } else {
1269                 syndrome = arm_fi_to_sfsc(&fi);
1270             }
1271             /* Copy across AET and ExT from VSESR. */
1272             syndrome |= env->cp15.vsesr_el2 & 0xd000;
1273         }
1274 
1275         /* Set VDISR_EL2.A along with the syndrome. */
1276         env->cp15.vdisr_el2 = syndrome | (1u << 31);
1277 
1278         /* Clear pending virtual SError */
1279         env->cp15.hcr_el2 &= ~HCR_VSE;
1280         cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1281     }
1282 }
1283