xref: /qemu/hw/intc/arm_gicv3_cpuif.c (revision b103cc6e74ac92f070a0e004bd84334e845c20b5)
1 /*
2  * ARM Generic Interrupt Controller v3 (emulation)
3  *
4  * Copyright (c) 2016 Linaro Limited
5  * Written by Peter Maydell
6  *
7  * This code is licensed under the GPL, version 2 or (at your option)
8  * any later version.
9  */
10 
11 /* This file contains the code for the system register interface
12  * portions of the GICv3.
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/bitops.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "trace.h"
20 #include "gicv3_internal.h"
21 #include "hw/irq.h"
22 #include "cpu.h"
23 #include "target/arm/cpregs.h"
24 #include "target/arm/cpu-features.h"
25 #include "target/arm/internals.h"
26 #include "system/tcg.h"
27 #include "system/qtest.h"
28 
29 /*
30  * Special case return value from hppvi_index(); must be larger than
31  * the architecturally maximum possible list register index (which is 15)
32  */
33 #define HPPVI_INDEX_VLPI 16
34 
35 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
36 {
37     return env->gicv3state;
38 }
39 
40 static bool gicv3_use_ns_bank(CPUARMState *env)
41 {
42     /* Return true if we should use the NonSecure bank for a banked GIC
43      * CPU interface register. Note that this differs from the
44      * access_secure_reg() function because GICv3 banked registers are
45      * banked even for AArch64, unlike the other CPU system registers.
46      */
47     return !arm_is_secure_below_el3(env);
48 }
49 
50 /* The minimum BPR for the virtual interface is a configurable property */
51 static inline int icv_min_vbpr(GICv3CPUState *cs)
52 {
53     return 7 - cs->vprebits;
54 }
55 
56 static inline int ich_num_aprs(GICv3CPUState *cs)
57 {
58     /* Return the number of virtual APR registers (1, 2, or 4) */
59     int aprmax = 1 << (cs->vprebits - 5);
60     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
61     return aprmax;
62 }
63 
64 /* Simple accessor functions for LR fields */
65 static uint32_t ich_lr_vintid(uint64_t lr)
66 {
67     return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
68 }
69 
70 static uint32_t ich_lr_pintid(uint64_t lr)
71 {
72     return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
73 }
74 
75 static uint32_t ich_lr_prio(uint64_t lr)
76 {
77     return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
78 }
79 
80 static int ich_lr_state(uint64_t lr)
81 {
82     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
83 }
84 
85 static bool icv_access(CPUARMState *env, int hcr_flags)
86 {
87     /* Return true if this ICC_ register access should really be
88      * directed to an ICV_ access. hcr_flags is a mask of
89      * HCR_EL2 bits to check: we treat this as an ICV_ access
90      * if we are in NS EL1 and at least one of the specified
91      * HCR_EL2 bits is set.
92      *
93      * ICV registers fall into four categories:
94      *  * access if NS EL1 and HCR_EL2.FMO == 1:
95      *    all ICV regs with '0' in their name
96      *  * access if NS EL1 and HCR_EL2.IMO == 1:
97      *    all ICV regs with '1' in their name
98      *  * access if NS EL1 and either IMO or FMO == 1:
99      *    CTLR, DIR, PMR, RPR
100      */
101     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
102     bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
103 
104     return flagmatch && arm_current_el(env) == 1
105         && !arm_is_secure_below_el3(env);
106 }
107 
108 static int read_vbpr(GICv3CPUState *cs, int grp)
109 {
110     /* Read VBPR value out of the VMCR field (caller must handle
111      * VCBPR effects if required)
112      */
113     if (grp == GICV3_G0) {
114         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
115                      ICH_VMCR_EL2_VBPR0_LENGTH);
116     } else {
117         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
118                          ICH_VMCR_EL2_VBPR1_LENGTH);
119     }
120 }
121 
122 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
123 {
124     /* Write new VBPR1 value, handling the "writing a value less than
125      * the minimum sets it to the minimum" semantics.
126      */
127     int min = icv_min_vbpr(cs);
128 
129     if (grp != GICV3_G0) {
130         min++;
131     }
132 
133     value = MAX(value, min);
134 
135     if (grp == GICV3_G0) {
136         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
137                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
138     } else {
139         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
140                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
141     }
142 }
143 
144 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
145 {
146     /* Return a mask word which clears the unimplemented priority bits
147      * from a priority value for a virtual interrupt. (Not to be confused
148      * with the group priority, whose mask depends on the value of VBPR
149      * for the interrupt group.)
150      */
151     return (~0U << (8 - cs->vpribits)) & 0xff;
152 }
153 
154 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
155 {
156     /* Calculate the current running priority based on the set bits
157      * in the ICH Active Priority Registers.
158      */
159     int i;
160     int aprmax = ich_num_aprs(cs);
161 
162     if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
163         return 0x0;
164     }
165 
166     for (i = 0; i < aprmax; i++) {
167         uint32_t apr = cs->ich_apr[GICV3_G0][i] |
168             cs->ich_apr[GICV3_G1NS][i];
169 
170         if (!apr) {
171             continue;
172         }
173         return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
174     }
175     /* No current active interrupts: return idle priority */
176     return 0xff;
177 }
178 
179 static int hppvi_index(GICv3CPUState *cs)
180 {
181     /*
182      * Return the list register index of the highest priority pending
183      * virtual interrupt, as per the HighestPriorityVirtualInterrupt
184      * pseudocode. If no pending virtual interrupts, return -1.
185      * If the highest priority pending virtual interrupt is a vLPI,
186      * return HPPVI_INDEX_VLPI.
187      * (The pseudocode handles checking whether the vLPI is higher
188      * priority than the highest priority list register at every
189      * callsite of HighestPriorityVirtualInterrupt; we check it here.)
190      */
191     ARMCPU *cpu = ARM_CPU(cs->cpu);
192     CPUARMState *env = &cpu->env;
193     int idx = -1;
194     int i;
195     /* Note that a list register entry with a priority of 0xff will
196      * never be reported by this function; this is the architecturally
197      * correct behaviour.
198      */
199     int prio = 0xff;
200     bool nmi = false;
201 
202     if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
203         /* Both groups disabled, definitely nothing to do */
204         return idx;
205     }
206 
207     for (i = 0; i < cs->num_list_regs; i++) {
208         uint64_t lr = cs->ich_lr_el2[i];
209         bool thisnmi;
210         int thisprio;
211 
212         if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
213             /* Not Pending */
214             continue;
215         }
216 
217         /* Ignore interrupts if relevant group enable not set */
218         if (lr & ICH_LR_EL2_GROUP) {
219             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
220                 continue;
221             }
222         } else {
223             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
224                 continue;
225             }
226         }
227 
228         thisnmi = lr & ICH_LR_EL2_NMI;
229         thisprio = ich_lr_prio(lr);
230 
231         if ((thisprio < prio) || ((thisprio == prio) && (thisnmi & (!nmi)))) {
232             prio = thisprio;
233             nmi = thisnmi;
234             idx = i;
235         }
236     }
237 
238     /*
239      * "no pending vLPI" is indicated with prio = 0xff, which always
240      * fails the priority check here. vLPIs are only considered
241      * when we are in Non-Secure state.
242      */
243     if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
244         if (cs->hppvlpi.grp == GICV3_G0) {
245             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
246                 return HPPVI_INDEX_VLPI;
247             }
248         } else {
249             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
250                 return HPPVI_INDEX_VLPI;
251             }
252         }
253     }
254 
255     return idx;
256 }
257 
258 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
259 {
260     /* Return a mask word which clears the subpriority bits from
261      * a priority value for a virtual interrupt in the specified group.
262      * This depends on the VBPR value.
263      * If using VBPR0 then:
264      *  a BPR of 0 means the group priority bits are [7:1];
265      *  a BPR of 1 means they are [7:2], and so on down to
266      *  a BPR of 7 meaning no group priority bits at all.
267      * If using VBPR1 then:
268      *  a BPR of 0 is impossible (the minimum value is 1)
269      *  a BPR of 1 means the group priority bits are [7:1];
270      *  a BPR of 2 means they are [7:2], and so on down to
271      *  a BPR of 7 meaning the group priority is [7].
272      *
273      * Which BPR to use depends on the group of the interrupt and
274      * the current ICH_VMCR_EL2.VCBPR settings.
275      *
276      * This corresponds to the VGroupBits() pseudocode.
277      */
278     int bpr;
279 
280     if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
281         group = GICV3_G0;
282     }
283 
284     bpr = read_vbpr(cs, group);
285     if (group == GICV3_G1NS) {
286         assert(bpr > 0);
287         bpr--;
288     }
289 
290     return ~0U << (bpr + 1);
291 }
292 
293 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
294 {
295     /* Return true if we can signal this virtual interrupt defined by
296      * the given list register value; see the pseudocode functions
297      * CanSignalVirtualInterrupt and CanSignalVirtualInt.
298      * Compare also icc_hppi_can_preempt() which is the non-virtual
299      * equivalent of these checks.
300      */
301     int grp;
302     bool is_nmi;
303     uint32_t mask, prio, rprio, vpmr;
304 
305     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
306         /* Virtual interface disabled */
307         return false;
308     }
309 
310     /* We don't need to check that this LR is in Pending state because
311      * that has already been done in hppvi_index().
312      */
313 
314     prio = ich_lr_prio(lr);
315     is_nmi = lr & ICH_LR_EL2_NMI;
316     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
317                      ICH_VMCR_EL2_VPMR_LENGTH);
318 
319     if (!is_nmi && prio >= vpmr) {
320         /* Priority mask masks this interrupt */
321         return false;
322     }
323 
324     rprio = ich_highest_active_virt_prio(cs);
325     if (rprio == 0xff) {
326         /* No running interrupt so we can preempt */
327         return true;
328     }
329 
330     grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
331 
332     mask = icv_gprio_mask(cs, grp);
333 
334     /* We only preempt a running interrupt if the pending interrupt's
335      * group priority is sufficient (the subpriorities are not considered).
336      */
337     if ((prio & mask) < (rprio & mask)) {
338         return true;
339     }
340 
341     if ((prio & mask) == (rprio & mask) && is_nmi &&
342         !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) {
343         return true;
344     }
345 
346     return false;
347 }
348 
349 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
350 {
351     /*
352      * Return true if we can signal the highest priority pending vLPI.
353      * We can assume we're Non-secure because hppvi_index() already
354      * tested for that.
355      */
356     uint32_t mask, rprio, vpmr;
357 
358     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
359         /* Virtual interface disabled */
360         return false;
361     }
362 
363     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
364                      ICH_VMCR_EL2_VPMR_LENGTH);
365 
366     if (cs->hppvlpi.prio >= vpmr) {
367         /* Priority mask masks this interrupt */
368         return false;
369     }
370 
371     rprio = ich_highest_active_virt_prio(cs);
372     if (rprio == 0xff) {
373         /* No running interrupt so we can preempt */
374         return true;
375     }
376 
377     mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
378 
379     /*
380      * We only preempt a running interrupt if the pending interrupt's
381      * group priority is sufficient (the subpriorities are not considered).
382      */
383     if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
384         return true;
385     }
386 
387     return false;
388 }
389 
390 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
391                                                 uint32_t *misr)
392 {
393     /* Return a set of bits indicating the EOI maintenance interrupt status
394      * for each list register. The EOI maintenance interrupt status is
395      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
396      * (see the GICv3 spec for the ICH_EISR_EL2 register).
397      * If misr is not NULL then we should also collect the information
398      * about the MISR.EOI, MISR.NP and MISR.U bits.
399      */
400     uint32_t value = 0;
401     int validcount = 0;
402     bool seenpending = false;
403     int i;
404 
405     for (i = 0; i < cs->num_list_regs; i++) {
406         uint64_t lr = cs->ich_lr_el2[i];
407 
408         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
409             == ICH_LR_EL2_EOI) {
410             value |= (1 << i);
411         }
412         if ((lr & ICH_LR_EL2_STATE_MASK)) {
413             validcount++;
414         }
415         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
416             seenpending = true;
417         }
418     }
419 
420     if (misr) {
421         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
422             *misr |= ICH_MISR_EL2_U;
423         }
424         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
425             *misr |= ICH_MISR_EL2_NP;
426         }
427         if (value) {
428             *misr |= ICH_MISR_EL2_EOI;
429         }
430     }
431     return value;
432 }
433 
434 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
435 {
436     /* Return a set of bits indicating the maintenance interrupt status
437      * (as seen in the ICH_MISR_EL2 register).
438      */
439     uint32_t value = 0;
440 
441     /* Scan list registers and fill in the U, NP and EOI bits */
442     eoi_maintenance_interrupt_state(cs, &value);
443 
444     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
445         (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
446         value |= ICH_MISR_EL2_LRENP;
447     }
448 
449     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
450         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
451         value |= ICH_MISR_EL2_VGRP0E;
452     }
453 
454     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
455         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
456         value |= ICH_MISR_EL2_VGRP0D;
457     }
458     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
459         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
460         value |= ICH_MISR_EL2_VGRP1E;
461     }
462 
463     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
464         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
465         value |= ICH_MISR_EL2_VGRP1D;
466     }
467 
468     return value;
469 }
470 
471 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
472 {
473     /*
474      * Tell the CPU about any pending virtual interrupts.
475      * This should only be called for changes that affect the
476      * vIRQ and vFIQ status and do not change the maintenance
477      * interrupt status. This means that unlike gicv3_cpuif_virt_update()
478      * this function won't recursively call back into the GIC code.
479      * The main use of this is when the redistributor has changed the
480      * highest priority pending virtual LPI.
481      */
482     int idx;
483     int irqlevel = 0;
484     int fiqlevel = 0;
485     int nmilevel = 0;
486 
487     idx = hppvi_index(cs);
488     trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
489                                   cs->hppvlpi.irq, cs->hppvlpi.grp,
490                                   cs->hppvlpi.prio);
491     if (idx == HPPVI_INDEX_VLPI) {
492         if (icv_hppvlpi_can_preempt(cs)) {
493             if (cs->hppvlpi.grp == GICV3_G0) {
494                 fiqlevel = 1;
495             } else {
496                 irqlevel = 1;
497             }
498         }
499     } else if (idx >= 0) {
500         uint64_t lr = cs->ich_lr_el2[idx];
501 
502         if (icv_hppi_can_preempt(cs, lr)) {
503             /*
504              * Virtual interrupts are simple: G0 are always FIQ, and G1 are
505              * IRQ or NMI which depends on the ICH_LR<n>_EL2.NMI to have
506              * non-maskable property.
507              */
508             if (lr & ICH_LR_EL2_GROUP) {
509                 if (lr & ICH_LR_EL2_NMI) {
510                     nmilevel = 1;
511                 } else {
512                     irqlevel = 1;
513                 }
514             } else {
515                 fiqlevel = 1;
516             }
517         }
518     }
519 
520     trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
521     qemu_set_irq(cs->parent_vfiq, fiqlevel);
522     qemu_set_irq(cs->parent_virq, irqlevel);
523     qemu_set_irq(cs->parent_vnmi, nmilevel);
524 }
525 
526 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
527 {
528     /*
529      * Tell the CPU about any pending virtual interrupts or
530      * maintenance interrupts, following a change to the state
531      * of the CPU interface relevant to virtual interrupts.
532      *
533      * CAUTION: this function will call qemu_set_irq() on the
534      * CPU maintenance IRQ line, which is typically wired up
535      * to the GIC as a per-CPU interrupt. This means that it
536      * will recursively call back into the GIC code via
537      * gicv3_redist_set_irq() and thus into the CPU interface code's
538      * gicv3_cpuif_update(). It is therefore important that this
539      * function is only called as the final action of a CPU interface
540      * register write implementation, after all the GIC state
541      * fields have been updated. gicv3_cpuif_update() also must
542      * not cause this function to be called, but that happens
543      * naturally as a result of there being no architectural
544      * linkage between the physical and virtual GIC logic.
545      */
546     ARMCPU *cpu = ARM_CPU(cs->cpu);
547     int maintlevel = 0;
548 
549     gicv3_cpuif_virt_irq_fiq_update(cs);
550 
551     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
552         maintenance_interrupt_state(cs) != 0) {
553         maintlevel = 1;
554     }
555 
556     trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
557     qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
558 }
559 
560 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
561 {
562     GICv3CPUState *cs = icc_cs_from_env(env);
563     int regno = ri->opc2 & 3;
564     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
565     uint64_t value = cs->ich_apr[grp][regno];
566 
567     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
568     return value;
569 }
570 
571 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
572                          uint64_t value)
573 {
574     GICv3CPUState *cs = icc_cs_from_env(env);
575     int regno = ri->opc2 & 3;
576     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
577 
578     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
579 
580     if (cs->nmi_support) {
581         cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
582     } else {
583         cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
584     }
585 
586     gicv3_cpuif_virt_irq_fiq_update(cs);
587     return;
588 }
589 
590 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
591 {
592     GICv3CPUState *cs = icc_cs_from_env(env);
593     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
594     uint64_t bpr;
595     bool satinc = false;
596 
597     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
598         /* reads return bpr0 + 1 saturated to 7, writes ignored */
599         grp = GICV3_G0;
600         satinc = true;
601     }
602 
603     bpr = read_vbpr(cs, grp);
604 
605     if (satinc) {
606         bpr++;
607         bpr = MIN(bpr, 7);
608     }
609 
610     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
611 
612     return bpr;
613 }
614 
615 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
616                           uint64_t value)
617 {
618     GICv3CPUState *cs = icc_cs_from_env(env);
619     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
620 
621     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
622                               gicv3_redist_affid(cs), value);
623 
624     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
625         /* reads return bpr0 + 1 saturated to 7, writes ignored */
626         return;
627     }
628 
629     write_vbpr(cs, grp, value);
630 
631     gicv3_cpuif_virt_irq_fiq_update(cs);
632 }
633 
634 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
635 {
636     GICv3CPUState *cs = icc_cs_from_env(env);
637     uint64_t value;
638 
639     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
640                       ICH_VMCR_EL2_VPMR_LENGTH);
641 
642     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
643     return value;
644 }
645 
646 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
647                           uint64_t value)
648 {
649     GICv3CPUState *cs = icc_cs_from_env(env);
650 
651     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
652 
653     value &= icv_fullprio_mask(cs);
654 
655     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
656                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
657 
658     gicv3_cpuif_virt_irq_fiq_update(cs);
659 }
660 
661 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
662 {
663     GICv3CPUState *cs = icc_cs_from_env(env);
664     int enbit;
665     uint64_t value;
666 
667     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
668     value = extract64(cs->ich_vmcr_el2, enbit, 1);
669 
670     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
671                                 gicv3_redist_affid(cs), value);
672     return value;
673 }
674 
675 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
676                              uint64_t value)
677 {
678     GICv3CPUState *cs = icc_cs_from_env(env);
679     int enbit;
680 
681     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
682                                  gicv3_redist_affid(cs), value);
683 
684     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
685 
686     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
687     gicv3_cpuif_virt_update(cs);
688 }
689 
690 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
691 {
692     GICv3CPUState *cs = icc_cs_from_env(env);
693     uint64_t value;
694 
695     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
696      * should match the ones reported in ich_vtr_read().
697      */
698     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
699         ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
700 
701     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
702         value |= ICC_CTLR_EL1_EOIMODE;
703     }
704 
705     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
706         value |= ICC_CTLR_EL1_CBPR;
707     }
708 
709     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
710     return value;
711 }
712 
713 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
714                                uint64_t value)
715 {
716     GICv3CPUState *cs = icc_cs_from_env(env);
717 
718     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
719 
720     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
721                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
722     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
723                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
724 
725     gicv3_cpuif_virt_irq_fiq_update(cs);
726 }
727 
728 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
729 {
730     GICv3CPUState *cs = icc_cs_from_env(env);
731     uint64_t prio = ich_highest_active_virt_prio(cs);
732 
733     if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) {
734         prio |= ICV_RPR_EL1_NMI;
735     }
736 
737     trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
738     return prio;
739 }
740 
741 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
742 {
743     GICv3CPUState *cs = icc_cs_from_env(env);
744     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
745     int idx = hppvi_index(cs);
746     uint64_t value = INTID_SPURIOUS;
747 
748     if (idx == HPPVI_INDEX_VLPI) {
749         if (cs->hppvlpi.grp == grp) {
750             value = cs->hppvlpi.irq;
751         }
752     } else if (idx >= 0) {
753         uint64_t lr = cs->ich_lr_el2[idx];
754         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
755 
756         if (grp == thisgrp) {
757             value = ich_lr_vintid(lr);
758         }
759     }
760 
761     trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
762                                gicv3_redist_affid(cs), value);
763     return value;
764 }
765 
766 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
767 {
768     /* Activate the interrupt in the specified list register
769      * by moving it from Pending to Active state, and update the
770      * Active Priority Registers.
771      */
772     uint32_t mask = icv_gprio_mask(cs, grp);
773     int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
774     bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI;
775     int aprbit = prio >> (8 - cs->vprebits);
776     int regno = aprbit / 32;
777     int regbit = aprbit % 32;
778 
779     cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
780     cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
781 
782     if (nmi) {
783         cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI;
784     } else {
785         cs->ich_apr[grp][regno] |= (1U << regbit);
786     }
787 }
788 
789 static void icv_activate_vlpi(GICv3CPUState *cs)
790 {
791     uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
792     int prio = cs->hppvlpi.prio & mask;
793     int aprbit = prio >> (8 - cs->vprebits);
794     int regno = aprbit / 32;
795     int regbit = aprbit % 32;
796 
797     cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit);
798     gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
799 }
800 
801 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
802 {
803     GICv3CPUState *cs = icc_cs_from_env(env);
804     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
805     int idx = hppvi_index(cs);
806     uint64_t intid = INTID_SPURIOUS;
807     int el = arm_current_el(env);
808 
809     if (idx == HPPVI_INDEX_VLPI) {
810         if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
811             intid = cs->hppvlpi.irq;
812             icv_activate_vlpi(cs);
813         }
814     } else if (idx >= 0) {
815         uint64_t lr = cs->ich_lr_el2[idx];
816         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
817         bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI;
818 
819         if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
820             intid = ich_lr_vintid(lr);
821             if (!gicv3_intid_is_special(intid)) {
822                 if (!nmi) {
823                     icv_activate_irq(cs, idx, grp);
824                 } else {
825                     intid = INTID_NMI;
826                 }
827             } else {
828                 /* Interrupt goes from Pending to Invalid */
829                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
830                 /* We will now return the (bogus) ID from the list register,
831                  * as per the pseudocode.
832                  */
833             }
834         }
835     }
836 
837     trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
838                              gicv3_redist_affid(cs), intid);
839 
840     gicv3_cpuif_virt_update(cs);
841 
842     return intid;
843 }
844 
845 static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
846 {
847     GICv3CPUState *cs = icc_cs_from_env(env);
848     int idx = hppvi_index(cs);
849     uint64_t intid = INTID_SPURIOUS;
850 
851     if (idx >= 0 && idx != HPPVI_INDEX_VLPI) {
852         uint64_t lr = cs->ich_lr_el2[idx];
853         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
854 
855         if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) {
856             intid = ich_lr_vintid(lr);
857             if (!gicv3_intid_is_special(intid)) {
858                 if (lr & ICH_LR_EL2_NMI) {
859                     icv_activate_irq(cs, idx, GICV3_G1NS);
860                 } else {
861                     intid = INTID_SPURIOUS;
862                 }
863             } else {
864                 /* Interrupt goes from Pending to Invalid */
865                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
866                 /*
867                  * We will now return the (bogus) ID from the list register,
868                  * as per the pseudocode.
869                  */
870             }
871         }
872     }
873 
874     trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid);
875 
876     gicv3_cpuif_virt_update(cs);
877 
878     return intid;
879 }
880 
881 static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
882 {
883     /*
884      * Return a mask word which clears the unimplemented priority bits
885      * from a priority value for a physical interrupt. (Not to be confused
886      * with the group priority, whose mask depends on the value of BPR
887      * for the interrupt group.)
888      */
889     return (~0U << (8 - cs->pribits)) & 0xff;
890 }
891 
892 static inline int icc_min_bpr(GICv3CPUState *cs)
893 {
894     /* The minimum BPR for the physical interface. */
895     return 7 - cs->prebits;
896 }
897 
898 static inline int icc_min_bpr_ns(GICv3CPUState *cs)
899 {
900     return icc_min_bpr(cs) + 1;
901 }
902 
903 static inline int icc_num_aprs(GICv3CPUState *cs)
904 {
905     /* Return the number of APR registers (1, 2, or 4) */
906     int aprmax = 1 << MAX(cs->prebits - 5, 0);
907     assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
908     return aprmax;
909 }
910 
911 static int icc_highest_active_prio(GICv3CPUState *cs)
912 {
913     /* Calculate the current running priority based on the set bits
914      * in the Active Priority Registers.
915      */
916     int i;
917 
918     if (cs->nmi_support) {
919         /*
920          * If an NMI is active this takes precedence over anything else
921          * for priority purposes; the NMI bit is only in the AP1R0 bit.
922          * We return here the effective priority of the NMI, which is
923          * either 0x0 or 0x80. Callers will need to check NMI again for
924          * purposes of either setting the RPR register bits or for
925          * prioritization of NMI vs non-NMI.
926          */
927         if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
928             return 0;
929         }
930         if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
931             return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80;
932         }
933     }
934 
935     for (i = 0; i < icc_num_aprs(cs); i++) {
936         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
937             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
938 
939         if (!apr) {
940             continue;
941         }
942         return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
943     }
944     /* No current active interrupts: return idle priority */
945     return 0xff;
946 }
947 
948 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
949 {
950     /* Return a mask word which clears the subpriority bits from
951      * a priority value for an interrupt in the specified group.
952      * This depends on the BPR value. For CBPR0 (S or NS):
953      *  a BPR of 0 means the group priority bits are [7:1];
954      *  a BPR of 1 means they are [7:2], and so on down to
955      *  a BPR of 7 meaning no group priority bits at all.
956      * For CBPR1 NS:
957      *  a BPR of 0 is impossible (the minimum value is 1)
958      *  a BPR of 1 means the group priority bits are [7:1];
959      *  a BPR of 2 means they are [7:2], and so on down to
960      *  a BPR of 7 meaning the group priority is [7].
961      *
962      * Which BPR to use depends on the group of the interrupt and
963      * the current ICC_CTLR.CBPR settings.
964      *
965      * This corresponds to the GroupBits() pseudocode.
966      */
967     int bpr;
968 
969     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
970         (group == GICV3_G1NS &&
971          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
972         group = GICV3_G0;
973     }
974 
975     bpr = cs->icc_bpr[group] & 7;
976 
977     if (group == GICV3_G1NS) {
978         assert(bpr > 0);
979         bpr--;
980     }
981 
982     return ~0U << (bpr + 1);
983 }
984 
985 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
986 {
987     /* Return true if there is no pending interrupt, or the
988      * highest priority pending interrupt is in a group which has been
989      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
990      */
991     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
992 }
993 
994 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
995 {
996     /* Return true if we have a pending interrupt of sufficient
997      * priority to preempt.
998      */
999     int rprio;
1000     uint32_t mask;
1001     ARMCPU *cpu = ARM_CPU(cs->cpu);
1002     CPUARMState *env = &cpu->env;
1003 
1004     if (icc_no_enabled_hppi(cs)) {
1005         return false;
1006     }
1007 
1008     if (cs->hppi.nmi) {
1009         if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1010             cs->hppi.grp == GICV3_G1NS) {
1011             if (cs->icc_pmr_el1 < 0x80) {
1012                 return false;
1013             }
1014             if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) {
1015                 return false;
1016             }
1017         }
1018     } else if (cs->hppi.prio >= cs->icc_pmr_el1) {
1019         /* Priority mask masks this interrupt */
1020         return false;
1021     }
1022 
1023     rprio = icc_highest_active_prio(cs);
1024     if (rprio == 0xff) {
1025         /* No currently running interrupt so we can preempt */
1026         return true;
1027     }
1028 
1029     mask = icc_gprio_mask(cs, cs->hppi.grp);
1030 
1031     /* We only preempt a running interrupt if the pending interrupt's
1032      * group priority is sufficient (the subpriorities are not considered).
1033      */
1034     if ((cs->hppi.prio & mask) < (rprio & mask)) {
1035         return true;
1036     }
1037 
1038     if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) {
1039         if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) {
1040             return true;
1041         }
1042     }
1043 
1044     return false;
1045 }
1046 
1047 void gicv3_cpuif_update(GICv3CPUState *cs)
1048 {
1049     /* Tell the CPU about its highest priority pending interrupt */
1050     int irqlevel = 0;
1051     int fiqlevel = 0;
1052     int nmilevel = 0;
1053     ARMCPU *cpu = ARM_CPU(cs->cpu);
1054     CPUARMState *env = &cpu->env;
1055 
1056     g_assert(bql_locked());
1057 
1058     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
1059                              cs->hppi.grp, cs->hppi.prio);
1060 
1061     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
1062         /* If a Security-enabled GIC sends a G1S interrupt to a
1063          * Security-disabled CPU, we must treat it as if it were G0.
1064          */
1065         cs->hppi.grp = GICV3_G0;
1066     }
1067 
1068     if (icc_hppi_can_preempt(cs)) {
1069         /* We have an interrupt: should we signal it as IRQ or FIQ?
1070          * This is described in the GICv3 spec section 4.6.2.
1071          */
1072         bool isfiq;
1073 
1074         switch (cs->hppi.grp) {
1075         case GICV3_G0:
1076             isfiq = true;
1077             break;
1078         case GICV3_G1:
1079             isfiq = (!arm_is_secure(env) ||
1080                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
1081             break;
1082         case GICV3_G1NS:
1083             isfiq = arm_is_secure(env);
1084             break;
1085         default:
1086             g_assert_not_reached();
1087         }
1088 
1089         if (isfiq) {
1090             fiqlevel = 1;
1091         } else if (cs->hppi.nmi) {
1092             nmilevel = 1;
1093         } else {
1094             irqlevel = 1;
1095         }
1096     }
1097 
1098     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
1099 
1100     qemu_set_irq(cs->parent_fiq, fiqlevel);
1101     qemu_set_irq(cs->parent_irq, irqlevel);
1102     qemu_set_irq(cs->parent_nmi, nmilevel);
1103 }
1104 
1105 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1106 {
1107     GICv3CPUState *cs = icc_cs_from_env(env);
1108     uint32_t value = cs->icc_pmr_el1;
1109 
1110     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1111         return icv_pmr_read(env, ri);
1112     }
1113 
1114     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1115         (env->cp15.scr_el3 & SCR_FIQ)) {
1116         /* NS access and Group 0 is inaccessible to NS: return the
1117          * NS view of the current priority
1118          */
1119         if ((value & 0x80) == 0) {
1120             /* Secure priorities not visible to NS */
1121             value = 0;
1122         } else if (value != 0xff) {
1123             value = (value << 1) & 0xff;
1124         }
1125     }
1126 
1127     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
1128 
1129     return value;
1130 }
1131 
1132 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1133                           uint64_t value)
1134 {
1135     GICv3CPUState *cs = icc_cs_from_env(env);
1136 
1137     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1138         return icv_pmr_write(env, ri, value);
1139     }
1140 
1141     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
1142 
1143     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
1144         (env->cp15.scr_el3 & SCR_FIQ)) {
1145         /* NS access and Group 0 is inaccessible to NS: return the
1146          * NS view of the current priority
1147          */
1148         if (!(cs->icc_pmr_el1 & 0x80)) {
1149             /* Current PMR in the secure range, don't allow NS to change it */
1150             return;
1151         }
1152         value = (value >> 1) | 0x80;
1153     }
1154     value &= icc_fullprio_mask(cs);
1155     cs->icc_pmr_el1 = value;
1156     gicv3_cpuif_update(cs);
1157 }
1158 
1159 static void icc_activate_irq(GICv3CPUState *cs, int irq)
1160 {
1161     /* Move the interrupt from the Pending state to Active, and update
1162      * the Active Priority Registers
1163      */
1164     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
1165     int prio = cs->hppi.prio & mask;
1166     int aprbit = prio >> (8 - cs->prebits);
1167     int regno = aprbit / 32;
1168     int regbit = aprbit % 32;
1169     bool nmi = cs->hppi.nmi;
1170 
1171     if (nmi) {
1172         cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
1173     } else {
1174         cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit);
1175     }
1176 
1177     if (irq < GIC_INTERNAL) {
1178         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
1179         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
1180         gicv3_redist_update(cs);
1181     } else if (irq < GICV3_LPI_INTID_START) {
1182         gicv3_gicd_active_set(cs->gic, irq);
1183         gicv3_gicd_pending_clear(cs->gic, irq);
1184         gicv3_update(cs->gic, irq, 1);
1185     } else {
1186         gicv3_redist_lpi_pending(cs, irq, 0);
1187     }
1188 }
1189 
1190 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
1191 {
1192     /* Return the highest priority pending interrupt register value
1193      * for group 0.
1194      */
1195     bool irq_is_secure;
1196 
1197     if (icc_no_enabled_hppi(cs)) {
1198         return INTID_SPURIOUS;
1199     }
1200 
1201     /* Check whether we can return the interrupt or if we should return
1202      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
1203      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1204      * is always zero.)
1205      */
1206     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1207                      (cs->hppi.grp != GICV3_G1NS));
1208 
1209     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
1210         return INTID_SPURIOUS;
1211     }
1212     if (irq_is_secure && !arm_is_secure(env)) {
1213         /* Secure interrupts not visible to Nonsecure */
1214         return INTID_SPURIOUS;
1215     }
1216 
1217     if (cs->hppi.grp != GICV3_G0) {
1218         /* Indicate to EL3 that there's a Group 1 interrupt for the other
1219          * state pending.
1220          */
1221         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
1222     }
1223 
1224     return cs->hppi.irq;
1225 }
1226 
1227 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
1228 {
1229     /* Return the highest priority pending interrupt register value
1230      * for group 1.
1231      */
1232     bool irq_is_secure;
1233 
1234     if (icc_no_enabled_hppi(cs)) {
1235         return INTID_SPURIOUS;
1236     }
1237 
1238     /* Check whether we can return the interrupt or if we should return
1239      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
1240      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
1241      * is always zero.)
1242      */
1243     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
1244                      (cs->hppi.grp != GICV3_G1NS));
1245 
1246     if (cs->hppi.grp == GICV3_G0) {
1247         /* Group 0 interrupts not visible via HPPIR1 */
1248         return INTID_SPURIOUS;
1249     }
1250     if (irq_is_secure) {
1251         if (!arm_is_secure(env)) {
1252             /* Secure interrupts not visible in Non-secure */
1253             return INTID_SPURIOUS;
1254         }
1255     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1256         /* Group 1 non-secure interrupts not visible in Secure EL1 */
1257         return INTID_SPURIOUS;
1258     }
1259 
1260     return cs->hppi.irq;
1261 }
1262 
1263 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1264 {
1265     GICv3CPUState *cs = icc_cs_from_env(env);
1266     uint64_t intid;
1267 
1268     if (icv_access(env, HCR_FMO)) {
1269         return icv_iar_read(env, ri);
1270     }
1271 
1272     if (!icc_hppi_can_preempt(cs)) {
1273         intid = INTID_SPURIOUS;
1274     } else {
1275         intid = icc_hppir0_value(cs, env);
1276     }
1277 
1278     if (!gicv3_intid_is_special(intid)) {
1279         icc_activate_irq(cs, intid);
1280     }
1281 
1282     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
1283     return intid;
1284 }
1285 
1286 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1287 {
1288     GICv3CPUState *cs = icc_cs_from_env(env);
1289     int el = arm_current_el(env);
1290     uint64_t intid;
1291 
1292     if (icv_access(env, HCR_IMO)) {
1293         return icv_iar_read(env, ri);
1294     }
1295 
1296     if (!icc_hppi_can_preempt(cs)) {
1297         intid = INTID_SPURIOUS;
1298     } else {
1299         intid = icc_hppir1_value(cs, env);
1300     }
1301 
1302     if (!gicv3_intid_is_special(intid)) {
1303         if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) {
1304             intid = INTID_NMI;
1305         } else {
1306             icc_activate_irq(cs, intid);
1307         }
1308     }
1309 
1310     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
1311     return intid;
1312 }
1313 
1314 static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1315 {
1316     GICv3CPUState *cs = icc_cs_from_env(env);
1317     uint64_t intid;
1318 
1319     if (icv_access(env, HCR_IMO)) {
1320         return icv_nmiar1_read(env, ri);
1321     }
1322 
1323     if (!icc_hppi_can_preempt(cs)) {
1324         intid = INTID_SPURIOUS;
1325     } else {
1326         intid = icc_hppir1_value(cs, env);
1327     }
1328 
1329     if (!gicv3_intid_is_special(intid)) {
1330         if (!cs->hppi.nmi) {
1331             intid = INTID_SPURIOUS;
1332         } else {
1333             icc_activate_irq(cs, intid);
1334         }
1335     }
1336 
1337     trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid);
1338     return intid;
1339 }
1340 
1341 static void icc_drop_prio(GICv3CPUState *cs, int grp)
1342 {
1343     /* Drop the priority of the currently active interrupt in
1344      * the specified group.
1345      *
1346      * Note that we can guarantee (because of the requirement to nest
1347      * ICC_IAR reads [which activate an interrupt and raise priority]
1348      * with ICC_EOIR writes [which drop the priority for the interrupt])
1349      * that the interrupt we're being called for is the highest priority
1350      * active interrupt, meaning that it has the lowest set bit in the
1351      * APR registers.
1352      *
1353      * If the guest does not honour the ordering constraints then the
1354      * behaviour of the GIC is UNPREDICTABLE, which for us means that
1355      * the values of the APR registers might become incorrect and the
1356      * running priority will be wrong, so interrupts that should preempt
1357      * might not do so, and interrupts that should not preempt might do so.
1358      */
1359     int i;
1360 
1361     for (i = 0; i < icc_num_aprs(cs); i++) {
1362         uint64_t *papr = &cs->icc_apr[grp][i];
1363 
1364         if (!*papr) {
1365             continue;
1366         }
1367 
1368         if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) {
1369             *papr &= (~ICC_AP1R_EL1_NMI);
1370             break;
1371         }
1372 
1373         /* Clear the lowest set bit */
1374         *papr &= *papr - 1;
1375         break;
1376     }
1377 
1378     /* running priority change means we need an update for this cpu i/f */
1379     gicv3_cpuif_update(cs);
1380 }
1381 
1382 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1383 {
1384     /* Return true if we should split priority drop and interrupt
1385      * deactivation, ie whether the relevant EOIMode bit is set.
1386      */
1387     if (arm_is_el3_or_mon(env)) {
1388         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
1389     }
1390     if (arm_is_secure_below_el3(env)) {
1391         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
1392     } else {
1393         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
1394     }
1395 }
1396 
1397 static int icc_highest_active_group(GICv3CPUState *cs)
1398 {
1399     /* Return the group with the highest priority active interrupt.
1400      * We can do this by just comparing the APRs to see which one
1401      * has the lowest set bit.
1402      * (If more than one group is active at the same priority then
1403      * we're in UNPREDICTABLE territory.)
1404      */
1405     int i;
1406 
1407     if (cs->nmi_support) {
1408         if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
1409             return GICV3_G1;
1410         }
1411         if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1412             return GICV3_G1NS;
1413         }
1414     }
1415 
1416     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
1417         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
1418         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
1419         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
1420 
1421         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
1422             return GICV3_G1NS;
1423         }
1424         if (g1ctz < g0ctz) {
1425             return GICV3_G1;
1426         }
1427         if (g0ctz < 32) {
1428             return GICV3_G0;
1429         }
1430     }
1431     /* No set active bits? UNPREDICTABLE; return -1 so the caller
1432      * ignores the spurious EOI attempt.
1433      */
1434     return -1;
1435 }
1436 
1437 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
1438 {
1439     if (irq < GIC_INTERNAL) {
1440         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
1441         gicv3_redist_update(cs);
1442     } else {
1443         gicv3_gicd_active_clear(cs->gic, irq);
1444         gicv3_update(cs->gic, irq, 1);
1445     }
1446 }
1447 
1448 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
1449 {
1450     /* Return true if we should split priority drop and interrupt
1451      * deactivation, ie whether the virtual EOIMode bit is set.
1452      */
1453     return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
1454 }
1455 
1456 static int icv_find_active(GICv3CPUState *cs, int irq)
1457 {
1458     /* Given an interrupt number for an active interrupt, return the index
1459      * of the corresponding list register, or -1 if there is no match.
1460      * Corresponds to FindActiveVirtualInterrupt pseudocode.
1461      */
1462     int i;
1463 
1464     for (i = 0; i < cs->num_list_regs; i++) {
1465         uint64_t lr = cs->ich_lr_el2[i];
1466 
1467         if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
1468             return i;
1469         }
1470     }
1471 
1472     return -1;
1473 }
1474 
1475 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
1476 {
1477     /* Deactivate the interrupt in the specified list register index */
1478     uint64_t lr = cs->ich_lr_el2[idx];
1479 
1480     if (lr & ICH_LR_EL2_HW) {
1481         /* Deactivate the associated physical interrupt */
1482         int pirq = ich_lr_pintid(lr);
1483 
1484         if (pirq < INTID_SECURE) {
1485             icc_deactivate_irq(cs, pirq);
1486         }
1487     }
1488 
1489     /* Clear the 'active' part of the state, so ActivePending->Pending
1490      * and Active->Invalid.
1491      */
1492     lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
1493     cs->ich_lr_el2[idx] = lr;
1494 }
1495 
1496 static void icv_increment_eoicount(GICv3CPUState *cs)
1497 {
1498     /* Increment the EOICOUNT field in ICH_HCR_EL2 */
1499     int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1500                              ICH_HCR_EL2_EOICOUNT_LENGTH);
1501 
1502     cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
1503                                 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
1504 }
1505 
1506 static int icv_drop_prio(GICv3CPUState *cs, bool *nmi)
1507 {
1508     /* Drop the priority of the currently active virtual interrupt
1509      * (favouring group 0 if there is a set active bit at
1510      * the same priority for both group 0 and group 1).
1511      * Return the priority value for the bit we just cleared,
1512      * or 0xff if no bits were set in the AP registers at all.
1513      * Note that though the ich_apr[] are uint64_t only the low
1514      * 32 bits are actually relevant.
1515      */
1516     int i;
1517     int aprmax = ich_num_aprs(cs);
1518 
1519     for (i = 0; i < aprmax; i++) {
1520         uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
1521         uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
1522         int apr0count, apr1count;
1523 
1524         if (!*papr0 && !*papr1) {
1525             continue;
1526         }
1527 
1528         if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) {
1529             *papr1 &= (~ICV_AP1R_EL1_NMI);
1530             *nmi = true;
1531             return 0xff;
1532         }
1533 
1534         /* We can't just use the bit-twiddling hack icc_drop_prio() does
1535          * because we need to return the bit number we cleared so
1536          * it can be compared against the list register's priority field.
1537          */
1538         apr0count = ctz32(*papr0);
1539         apr1count = ctz32(*papr1);
1540 
1541         if (apr0count <= apr1count) {
1542             *papr0 &= *papr0 - 1;
1543             return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
1544         } else {
1545             *papr1 &= *papr1 - 1;
1546             return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
1547         }
1548     }
1549     return 0xff;
1550 }
1551 
1552 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1553                           uint64_t value)
1554 {
1555     /* Deactivate interrupt */
1556     GICv3CPUState *cs = icc_cs_from_env(env);
1557     int idx;
1558     int irq = value & 0xffffff;
1559 
1560     trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
1561 
1562     if (irq >= GICV3_MAXIRQ) {
1563         /* Also catches special interrupt numbers and LPIs */
1564         return;
1565     }
1566 
1567     if (!icv_eoi_split(env, cs)) {
1568         return;
1569     }
1570 
1571     idx = icv_find_active(cs, irq);
1572 
1573     if (idx < 0) {
1574         /* No list register matching this, so increment the EOI count
1575          * (might trigger a maintenance interrupt)
1576          */
1577         icv_increment_eoicount(cs);
1578     } else {
1579         icv_deactivate_irq(cs, idx);
1580     }
1581 
1582     gicv3_cpuif_virt_update(cs);
1583 }
1584 
1585 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1586                            uint64_t value)
1587 {
1588     /* End of Interrupt */
1589     GICv3CPUState *cs = icc_cs_from_env(env);
1590     int irq = value & 0xffffff;
1591     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
1592     int idx, dropprio;
1593     bool nmi = false;
1594 
1595     trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
1596                                gicv3_redist_affid(cs), value);
1597 
1598     if (gicv3_intid_is_special(irq)) {
1599         return;
1600     }
1601 
1602     /* We implement the IMPDEF choice of "drop priority before doing
1603      * error checks" (because that lets us avoid scanning the AP
1604      * registers twice).
1605      */
1606     dropprio = icv_drop_prio(cs, &nmi);
1607     if (dropprio == 0xff && !nmi) {
1608         /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
1609          * whether the list registers are checked in this
1610          * situation; we choose not to.
1611          */
1612         return;
1613     }
1614 
1615     idx = icv_find_active(cs, irq);
1616 
1617     if (idx < 0) {
1618         /*
1619          * No valid list register corresponding to EOI ID; if this is a vLPI
1620          * not in the list regs then do nothing; otherwise increment EOI count
1621          */
1622         if (irq < GICV3_LPI_INTID_START) {
1623             icv_increment_eoicount(cs);
1624         }
1625     } else {
1626         uint64_t lr = cs->ich_lr_el2[idx];
1627         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
1628         int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
1629         bool thisnmi = lr & ICH_LR_EL2_NMI;
1630 
1631         if (thisgrp == grp && (lr_gprio == dropprio || (thisnmi & nmi))) {
1632             if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) {
1633                 /*
1634                  * Priority drop and deactivate not split: deactivate irq now.
1635                  * LPIs always get their active state cleared immediately
1636                  * because no separate deactivate is expected.
1637                  */
1638                 icv_deactivate_irq(cs, idx);
1639             }
1640         }
1641     }
1642 
1643     gicv3_cpuif_virt_update(cs);
1644 }
1645 
1646 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1647                            uint64_t value)
1648 {
1649     /* End of Interrupt */
1650     GICv3CPUState *cs = icc_cs_from_env(env);
1651     int irq = value & 0xffffff;
1652     int grp;
1653     bool is_eoir0 = ri->crm == 8;
1654 
1655     if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
1656         icv_eoir_write(env, ri, value);
1657         return;
1658     }
1659 
1660     trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
1661                                gicv3_redist_affid(cs), value);
1662 
1663     if ((irq >= cs->gic->num_irq) &&
1664         !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
1665         /* This handles two cases:
1666          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
1667          * to the GICC_EOIR, the GIC ignores that write.
1668          * 2. If software writes the number of a non-existent interrupt
1669          * this must be a subcase of "value written does not match the last
1670          * valid interrupt value read from the Interrupt Acknowledge
1671          * register" and so this is UNPREDICTABLE. We choose to ignore it.
1672          */
1673         return;
1674     }
1675 
1676     grp = icc_highest_active_group(cs);
1677     switch (grp) {
1678     case GICV3_G0:
1679         if (!is_eoir0) {
1680             return;
1681         }
1682         if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
1683             && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1684             return;
1685         }
1686         break;
1687     case GICV3_G1:
1688         if (is_eoir0) {
1689             return;
1690         }
1691         if (!arm_is_secure(env)) {
1692             return;
1693         }
1694         break;
1695     case GICV3_G1NS:
1696         if (is_eoir0) {
1697             return;
1698         }
1699         if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
1700             return;
1701         }
1702         break;
1703     default:
1704         qemu_log_mask(LOG_GUEST_ERROR,
1705                       "%s: IRQ %d isn't active\n", __func__, irq);
1706         return;
1707     }
1708 
1709     icc_drop_prio(cs, grp);
1710 
1711     if (!icc_eoi_split(env, cs)) {
1712         /* Priority drop and deactivate not split: deactivate irq now */
1713         icc_deactivate_irq(cs, irq);
1714     }
1715 }
1716 
1717 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
1718 {
1719     GICv3CPUState *cs = icc_cs_from_env(env);
1720     uint64_t value;
1721 
1722     if (icv_access(env, HCR_FMO)) {
1723         return icv_hppir_read(env, ri);
1724     }
1725 
1726     value = icc_hppir0_value(cs, env);
1727     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
1728     return value;
1729 }
1730 
1731 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
1732 {
1733     GICv3CPUState *cs = icc_cs_from_env(env);
1734     uint64_t value;
1735 
1736     if (icv_access(env, HCR_IMO)) {
1737         return icv_hppir_read(env, ri);
1738     }
1739 
1740     value = icc_hppir1_value(cs, env);
1741     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
1742     return value;
1743 }
1744 
1745 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1746 {
1747     GICv3CPUState *cs = icc_cs_from_env(env);
1748     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1749     bool satinc = false;
1750     uint64_t bpr;
1751 
1752     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1753         return icv_bpr_read(env, ri);
1754     }
1755 
1756     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1757         grp = GICV3_G1NS;
1758     }
1759 
1760     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1761         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1762         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1763          * modify BPR0
1764          */
1765         grp = GICV3_G0;
1766     }
1767 
1768     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1769         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1770         /* reads return bpr0 + 1 sat to 7, writes ignored */
1771         grp = GICV3_G0;
1772         satinc = true;
1773     }
1774 
1775     bpr = cs->icc_bpr[grp];
1776     if (satinc) {
1777         bpr++;
1778         bpr = MIN(bpr, 7);
1779     }
1780 
1781     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
1782 
1783     return bpr;
1784 }
1785 
1786 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1787                           uint64_t value)
1788 {
1789     GICv3CPUState *cs = icc_cs_from_env(env);
1790     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
1791     uint64_t minval;
1792 
1793     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1794         icv_bpr_write(env, ri, value);
1795         return;
1796     }
1797 
1798     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
1799                               gicv3_redist_affid(cs), value);
1800 
1801     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1802         grp = GICV3_G1NS;
1803     }
1804 
1805     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
1806         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
1807         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
1808          * modify BPR0
1809          */
1810         grp = GICV3_G0;
1811     }
1812 
1813     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
1814         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
1815         /* reads return bpr0 + 1 sat to 7, writes ignored */
1816         return;
1817     }
1818 
1819     minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
1820     if (value < minval) {
1821         value = minval;
1822     }
1823 
1824     cs->icc_bpr[grp] = value & 7;
1825     gicv3_cpuif_update(cs);
1826 }
1827 
1828 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829 {
1830     GICv3CPUState *cs = icc_cs_from_env(env);
1831     uint64_t value;
1832 
1833     int regno = ri->opc2 & 3;
1834     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1835 
1836     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1837         return icv_ap_read(env, ri);
1838     }
1839 
1840     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1841         grp = GICV3_G1NS;
1842     }
1843 
1844     value = cs->icc_apr[grp][regno];
1845 
1846     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1847     return value;
1848 }
1849 
1850 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1851                          uint64_t value)
1852 {
1853     GICv3CPUState *cs = icc_cs_from_env(env);
1854 
1855     int regno = ri->opc2 & 3;
1856     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
1857 
1858     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
1859         icv_ap_write(env, ri, value);
1860         return;
1861     }
1862 
1863     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
1864 
1865     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
1866         grp = GICV3_G1NS;
1867     }
1868 
1869     /* It's not possible to claim that a Non-secure interrupt is active
1870      * at a priority outside the Non-secure range (128..255), since this
1871      * would otherwise allow malicious NS code to block delivery of S interrupts
1872      * by writing a bad value to these registers.
1873      */
1874     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
1875         return;
1876     }
1877 
1878     if (cs->nmi_support) {
1879         cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI);
1880     } else {
1881         cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
1882     }
1883     gicv3_cpuif_update(cs);
1884 }
1885 
1886 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
1887                           uint64_t value)
1888 {
1889     /* Deactivate interrupt */
1890     GICv3CPUState *cs = icc_cs_from_env(env);
1891     int irq = value & 0xffffff;
1892     bool irq_is_secure, single_sec_state, irq_is_grp0;
1893     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
1894 
1895     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1896         icv_dir_write(env, ri, value);
1897         return;
1898     }
1899 
1900     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
1901 
1902     if (irq >= cs->gic->num_irq) {
1903         /* Also catches special interrupt numbers and LPIs */
1904         return;
1905     }
1906 
1907     if (!icc_eoi_split(env, cs)) {
1908         return;
1909     }
1910 
1911     int grp = gicv3_irq_group(cs->gic, cs, irq);
1912 
1913     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
1914     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
1915     irq_is_grp0 = grp == GICV3_G0;
1916 
1917     /* Check whether we're allowed to deactivate this interrupt based
1918      * on its group and the current CPU state.
1919      * These checks are laid out to correspond to the spec's pseudocode.
1920      */
1921     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
1922     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
1923     /* No need to include !IsSecure in route_*_to_el2 as it's only
1924      * tested in cases where we know !IsSecure is true.
1925      */
1926     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1927     route_fiq_to_el2 = hcr_el2 & HCR_FMO;
1928     route_irq_to_el2 = hcr_el2 & HCR_IMO;
1929 
1930     switch (arm_current_el(env)) {
1931     case 3:
1932         break;
1933     case 2:
1934         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
1935             break;
1936         }
1937         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
1938             break;
1939         }
1940         return;
1941     case 1:
1942         if (!arm_is_secure_below_el3(env)) {
1943             if (single_sec_state && irq_is_grp0 &&
1944                 !route_fiq_to_el3 && !route_fiq_to_el2) {
1945                 break;
1946             }
1947             if (!irq_is_secure && !irq_is_grp0 &&
1948                 !route_irq_to_el3 && !route_irq_to_el2) {
1949                 break;
1950             }
1951         } else {
1952             if (irq_is_grp0 && !route_fiq_to_el3) {
1953                 break;
1954             }
1955             if (!irq_is_grp0 &&
1956                 (!irq_is_secure || !single_sec_state) &&
1957                 !route_irq_to_el3) {
1958                 break;
1959             }
1960         }
1961         return;
1962     default:
1963         g_assert_not_reached();
1964     }
1965 
1966     icc_deactivate_irq(cs, irq);
1967 }
1968 
1969 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1970 {
1971     GICv3CPUState *cs = icc_cs_from_env(env);
1972     uint64_t prio;
1973 
1974     if (icv_access(env, HCR_FMO | HCR_IMO)) {
1975         return icv_rpr_read(env, ri);
1976     }
1977 
1978     prio = icc_highest_active_prio(cs);
1979 
1980     if (arm_feature(env, ARM_FEATURE_EL3) &&
1981         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
1982         /* NS GIC access and Group 0 is inaccessible to NS */
1983         if ((prio & 0x80) == 0) {
1984             /* NS mustn't see priorities in the Secure half of the range */
1985             prio = 0;
1986         } else if (prio != 0xff) {
1987             /* Non-idle priority: show the Non-secure view of it */
1988             prio = (prio << 1) & 0xff;
1989         }
1990     }
1991 
1992     if (cs->nmi_support) {
1993         /* NMI info is reported in the high bits of RPR */
1994         if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
1995             if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
1996                 prio |= ICC_RPR_EL1_NMI;
1997             }
1998         } else {
1999             if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
2000                 prio |= ICC_RPR_EL1_NSNMI;
2001             }
2002             if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
2003                 prio |= ICC_RPR_EL1_NMI;
2004             }
2005         }
2006     }
2007 
2008     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
2009     return prio;
2010 }
2011 
2012 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
2013                              uint64_t value, int grp, bool ns)
2014 {
2015     GICv3State *s = cs->gic;
2016 
2017     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
2018     uint64_t aff = extract64(value, 48, 8) << 16 |
2019         extract64(value, 32, 8) << 8 |
2020         extract64(value, 16, 8);
2021     uint32_t targetlist = extract64(value, 0, 16);
2022     uint32_t irq = extract64(value, 24, 4);
2023     bool irm = extract64(value, 40, 1);
2024     int i;
2025 
2026     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
2027         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
2028          * interrupts as Group 0 interrupts and must send Secure Group 0
2029          * interrupts to the target CPUs.
2030          */
2031         grp = GICV3_G0;
2032     }
2033 
2034     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
2035                                  aff, targetlist);
2036 
2037     for (i = 0; i < s->num_cpu; i++) {
2038         GICv3CPUState *ocs = &s->cpu[i];
2039 
2040         if (irm) {
2041             /* IRM == 1 : route to all CPUs except self */
2042             if (cs == ocs) {
2043                 continue;
2044             }
2045         } else {
2046             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
2047              * where the corresponding bit is set in targetlist
2048              */
2049             int aff0;
2050 
2051             if (ocs->gicr_typer >> 40 != aff) {
2052                 continue;
2053             }
2054             aff0 = extract64(ocs->gicr_typer, 32, 8);
2055             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
2056                 continue;
2057             }
2058         }
2059 
2060         /* The redistributor will check against its own GICR_NSACR as needed */
2061         gicv3_redist_send_sgi(ocs, grp, irq, ns);
2062     }
2063 }
2064 
2065 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2066                            uint64_t value)
2067 {
2068     /* Generate Secure Group 0 SGI. */
2069     GICv3CPUState *cs = icc_cs_from_env(env);
2070     bool ns = !arm_is_secure(env);
2071 
2072     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
2073 }
2074 
2075 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2076                            uint64_t value)
2077 {
2078     /* Generate Group 1 SGI for the current Security state */
2079     GICv3CPUState *cs = icc_cs_from_env(env);
2080     int grp;
2081     bool ns = !arm_is_secure(env);
2082 
2083     grp = ns ? GICV3_G1NS : GICV3_G1;
2084     icc_generate_sgi(env, cs, value, grp, ns);
2085 }
2086 
2087 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
2088                              uint64_t value)
2089 {
2090     /* Generate Group 1 SGI for the Security state that is not
2091      * the current state
2092      */
2093     GICv3CPUState *cs = icc_cs_from_env(env);
2094     int grp;
2095     bool ns = !arm_is_secure(env);
2096 
2097     grp = ns ? GICV3_G1 : GICV3_G1NS;
2098     icc_generate_sgi(env, cs, value, grp, ns);
2099 }
2100 
2101 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
2102 {
2103     GICv3CPUState *cs = icc_cs_from_env(env);
2104     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2105     uint64_t value;
2106 
2107     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2108         return icv_igrpen_read(env, ri);
2109     }
2110 
2111     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2112         grp = GICV3_G1NS;
2113     }
2114 
2115     value = cs->icc_igrpen[grp];
2116     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
2117                                 gicv3_redist_affid(cs), value);
2118     return value;
2119 }
2120 
2121 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
2122                              uint64_t value)
2123 {
2124     GICv3CPUState *cs = icc_cs_from_env(env);
2125     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
2126 
2127     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
2128         icv_igrpen_write(env, ri, value);
2129         return;
2130     }
2131 
2132     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
2133                                  gicv3_redist_affid(cs), value);
2134 
2135     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
2136         grp = GICV3_G1NS;
2137     }
2138 
2139     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
2140     gicv3_cpuif_update(cs);
2141 }
2142 
2143 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2144 {
2145     GICv3CPUState *cs = icc_cs_from_env(env);
2146     uint64_t value;
2147 
2148     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2149     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
2150     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
2151     return value;
2152 }
2153 
2154 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2155                                   uint64_t value)
2156 {
2157     GICv3CPUState *cs = icc_cs_from_env(env);
2158 
2159     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
2160 
2161     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
2162     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
2163     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
2164     gicv3_cpuif_update(cs);
2165 }
2166 
2167 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
2168 {
2169     GICv3CPUState *cs = icc_cs_from_env(env);
2170     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2171     uint64_t value;
2172 
2173     if (icv_access(env, HCR_FMO | HCR_IMO)) {
2174         return icv_ctlr_read(env, ri);
2175     }
2176 
2177     value = cs->icc_ctlr_el1[bank];
2178     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
2179     return value;
2180 }
2181 
2182 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2183                                uint64_t value)
2184 {
2185     GICv3CPUState *cs = icc_cs_from_env(env);
2186     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
2187     uint64_t mask;
2188 
2189     if (icv_access(env, HCR_FMO | HCR_IMO)) {
2190         icv_ctlr_write(env, ri, value);
2191         return;
2192     }
2193 
2194     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
2195 
2196     /* Only CBPR and EOIMODE can be RW;
2197      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
2198      * the asseciated priority-based routing of them);
2199      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
2200      */
2201     if (arm_feature(env, ARM_FEATURE_EL3) &&
2202         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
2203         mask = ICC_CTLR_EL1_EOIMODE;
2204     } else {
2205         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
2206     }
2207 
2208     cs->icc_ctlr_el1[bank] &= ~mask;
2209     cs->icc_ctlr_el1[bank] |= (value & mask);
2210     gicv3_cpuif_update(cs);
2211 }
2212 
2213 
2214 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
2215 {
2216     GICv3CPUState *cs = icc_cs_from_env(env);
2217     uint64_t value;
2218 
2219     value = cs->icc_ctlr_el3;
2220     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2221         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
2222     }
2223     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2224         value |= ICC_CTLR_EL3_CBPR_EL1NS;
2225     }
2226     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
2227         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
2228     }
2229     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
2230         value |= ICC_CTLR_EL3_CBPR_EL1S;
2231     }
2232 
2233     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
2234     return value;
2235 }
2236 
2237 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2238                                uint64_t value)
2239 {
2240     GICv3CPUState *cs = icc_cs_from_env(env);
2241     uint64_t mask;
2242 
2243     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
2244 
2245     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
2246     cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2247     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
2248         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
2249     }
2250     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
2251         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
2252     }
2253 
2254     cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
2255     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
2256         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
2257     }
2258     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
2259         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
2260     }
2261 
2262     /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
2263     mask = ICC_CTLR_EL3_EOIMODE_EL3;
2264 
2265     cs->icc_ctlr_el3 &= ~mask;
2266     cs->icc_ctlr_el3 |= (value & mask);
2267     gicv3_cpuif_update(cs);
2268 }
2269 
2270 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
2271                                           const ARMCPRegInfo *ri, bool isread)
2272 {
2273     CPAccessResult r = CP_ACCESS_OK;
2274     GICv3CPUState *cs = icc_cs_from_env(env);
2275     int el = arm_current_el(env);
2276 
2277     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
2278         el == 1 && !arm_is_secure_below_el3(env)) {
2279         /* Takes priority over a possible EL3 trap */
2280         return CP_ACCESS_TRAP_EL2;
2281     }
2282 
2283     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
2284         switch (el) {
2285         case 1:
2286             /* Note that arm_hcr_el2_eff takes secure state into account.  */
2287             if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
2288                 r = CP_ACCESS_TRAP_EL3;
2289             }
2290             break;
2291         case 2:
2292             r = CP_ACCESS_TRAP_EL3;
2293             break;
2294         case 3:
2295             if (!arm_is_el3_or_mon(env)) {
2296                 r = CP_ACCESS_TRAP_EL3;
2297             }
2298             break;
2299         default:
2300             g_assert_not_reached();
2301         }
2302     }
2303 
2304     return r;
2305 }
2306 
2307 static CPAccessResult gicv3_dir_access(CPUARMState *env,
2308                                        const ARMCPRegInfo *ri, bool isread)
2309 {
2310     GICv3CPUState *cs = icc_cs_from_env(env);
2311 
2312     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
2313         arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
2314         /* Takes priority over a possible EL3 trap */
2315         return CP_ACCESS_TRAP_EL2;
2316     }
2317 
2318     return gicv3_irqfiq_access(env, ri, isread);
2319 }
2320 
2321 static CPAccessResult gicv3_sgi_access(CPUARMState *env,
2322                                        const ARMCPRegInfo *ri, bool isread)
2323 {
2324     if (arm_current_el(env) == 1 &&
2325         (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
2326         /* Takes priority over a possible EL3 trap */
2327         return CP_ACCESS_TRAP_EL2;
2328     }
2329 
2330     return gicv3_irqfiq_access(env, ri, isread);
2331 }
2332 
2333 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
2334                                        const ARMCPRegInfo *ri, bool isread)
2335 {
2336     CPAccessResult r = CP_ACCESS_OK;
2337     GICv3CPUState *cs = icc_cs_from_env(env);
2338     int el = arm_current_el(env);
2339 
2340     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
2341         el == 1 && !arm_is_secure_below_el3(env)) {
2342         /* Takes priority over a possible EL3 trap */
2343         return CP_ACCESS_TRAP_EL2;
2344     }
2345 
2346     if (env->cp15.scr_el3 & SCR_FIQ) {
2347         switch (el) {
2348         case 1:
2349             if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
2350                 r = CP_ACCESS_TRAP_EL3;
2351             }
2352             break;
2353         case 2:
2354             r = CP_ACCESS_TRAP_EL3;
2355             break;
2356         case 3:
2357             if (!arm_is_el3_or_mon(env)) {
2358                 r = CP_ACCESS_TRAP_EL3;
2359             }
2360             break;
2361         default:
2362             g_assert_not_reached();
2363         }
2364     }
2365 
2366     return r;
2367 }
2368 
2369 static CPAccessResult gicv3_irq_access(CPUARMState *env,
2370                                        const ARMCPRegInfo *ri, bool isread)
2371 {
2372     CPAccessResult r = CP_ACCESS_OK;
2373     GICv3CPUState *cs = icc_cs_from_env(env);
2374     int el = arm_current_el(env);
2375 
2376     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
2377         el == 1 && !arm_is_secure_below_el3(env)) {
2378         /* Takes priority over a possible EL3 trap */
2379         return CP_ACCESS_TRAP_EL2;
2380     }
2381 
2382     if (env->cp15.scr_el3 & SCR_IRQ) {
2383         switch (el) {
2384         case 1:
2385             if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
2386                 r = CP_ACCESS_TRAP_EL3;
2387             }
2388             break;
2389         case 2:
2390             r = CP_ACCESS_TRAP_EL3;
2391             break;
2392         case 3:
2393             if (!arm_is_el3_or_mon(env)) {
2394                 r = CP_ACCESS_TRAP_EL3;
2395             }
2396             break;
2397         default:
2398             g_assert_not_reached();
2399         }
2400     }
2401 
2402     return r;
2403 }
2404 
2405 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2406 {
2407     GICv3CPUState *cs = icc_cs_from_env(env);
2408 
2409     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
2410         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2411         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2412     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
2413         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
2414         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
2415     cs->icc_pmr_el1 = 0;
2416     cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
2417     cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
2418     cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
2419     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
2420     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
2421     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
2422         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
2423         ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
2424 
2425     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
2426     cs->ich_hcr_el2 = 0;
2427     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
2428     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
2429         ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
2430         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
2431 }
2432 
2433 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
2434     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
2435       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
2436       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2437       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2438       .readfn = icc_pmr_read,
2439       .writefn = icc_pmr_write,
2440       /* We hang the whole cpu interface reset routine off here
2441        * rather than parcelling it out into one little function
2442        * per register
2443        */
2444       .resetfn = icc_reset,
2445     },
2446     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
2447       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
2448       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2449       .access = PL1_R, .accessfn = gicv3_fiq_access,
2450       .readfn = icc_iar0_read,
2451     },
2452     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
2453       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
2454       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2455       .access = PL1_W, .accessfn = gicv3_fiq_access,
2456       .writefn = icc_eoir_write,
2457     },
2458     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
2459       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
2460       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2461       .access = PL1_R, .accessfn = gicv3_fiq_access,
2462       .readfn = icc_hppir0_read,
2463     },
2464     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
2465       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
2466       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2467       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2468       .readfn = icc_bpr_read,
2469       .writefn = icc_bpr_write,
2470     },
2471     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
2472       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
2473       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2474       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2475       .readfn = icc_ap_read,
2476       .writefn = icc_ap_write,
2477     },
2478     /* All the ICC_AP1R*_EL1 registers are banked */
2479     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
2480       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
2481       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2482       .access = PL1_RW, .accessfn = gicv3_irq_access,
2483       .readfn = icc_ap_read,
2484       .writefn = icc_ap_write,
2485     },
2486     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
2487       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
2488       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2489       .access = PL1_W, .accessfn = gicv3_dir_access,
2490       .writefn = icc_dir_write,
2491     },
2492     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
2493       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
2494       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2495       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
2496       .readfn = icc_rpr_read,
2497     },
2498     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
2499       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
2500       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2501       .access = PL1_W, .accessfn = gicv3_sgi_access,
2502       .writefn = icc_sgi1r_write,
2503     },
2504     { .name = "ICC_SGI1R",
2505       .cp = 15, .opc1 = 0, .crm = 12,
2506       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2507       .access = PL1_W, .accessfn = gicv3_sgi_access,
2508       .writefn = icc_sgi1r_write,
2509     },
2510     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
2511       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
2512       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2513       .access = PL1_W, .accessfn = gicv3_sgi_access,
2514       .writefn = icc_asgi1r_write,
2515     },
2516     { .name = "ICC_ASGI1R",
2517       .cp = 15, .opc1 = 1, .crm = 12,
2518       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2519       .access = PL1_W, .accessfn = gicv3_sgi_access,
2520       .writefn = icc_asgi1r_write,
2521     },
2522     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
2523       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
2524       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2525       .access = PL1_W, .accessfn = gicv3_sgi_access,
2526       .writefn = icc_sgi0r_write,
2527     },
2528     { .name = "ICC_SGI0R",
2529       .cp = 15, .opc1 = 2, .crm = 12,
2530       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
2531       .access = PL1_W, .accessfn = gicv3_sgi_access,
2532       .writefn = icc_sgi0r_write,
2533     },
2534     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
2535       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
2536       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2537       .access = PL1_R, .accessfn = gicv3_irq_access,
2538       .readfn = icc_iar1_read,
2539     },
2540     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
2541       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
2542       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2543       .access = PL1_W, .accessfn = gicv3_irq_access,
2544       .writefn = icc_eoir_write,
2545     },
2546     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
2547       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
2548       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2549       .access = PL1_R, .accessfn = gicv3_irq_access,
2550       .readfn = icc_hppir1_read,
2551     },
2552     /* This register is banked */
2553     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
2554       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
2555       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2556       .access = PL1_RW, .accessfn = gicv3_irq_access,
2557       .readfn = icc_bpr_read,
2558       .writefn = icc_bpr_write,
2559     },
2560     /* This register is banked */
2561     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
2562       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
2563       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2564       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
2565       .readfn = icc_ctlr_el1_read,
2566       .writefn = icc_ctlr_el1_write,
2567     },
2568     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
2569       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
2570       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2571       .access = PL1_RW,
2572       /* We don't support IRQ/FIQ bypass and system registers are
2573        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2574        * This register is banked but since it's constant we don't
2575        * need to do anything special.
2576        */
2577       .resetvalue = 0x7,
2578     },
2579     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
2580       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
2581       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2582       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2583       .fgt = FGT_ICC_IGRPENN_EL1,
2584       .readfn = icc_igrpen_read,
2585       .writefn = icc_igrpen_write,
2586     },
2587     /* This register is banked */
2588     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
2589       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
2590       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2591       .access = PL1_RW, .accessfn = gicv3_irq_access,
2592       .fgt = FGT_ICC_IGRPENN_EL1,
2593       .readfn = icc_igrpen_read,
2594       .writefn = icc_igrpen_write,
2595     },
2596     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
2597       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
2598       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2599       .access = PL2_RW,
2600       /* We don't support IRQ/FIQ bypass and system registers are
2601        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2602        */
2603       .resetvalue = 0xf,
2604     },
2605     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
2606       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
2607       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2608       .access = PL3_RW,
2609       .readfn = icc_ctlr_el3_read,
2610       .writefn = icc_ctlr_el3_write,
2611     },
2612     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
2613       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
2614       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
2615       .access = PL3_RW,
2616       /* We don't support IRQ/FIQ bypass and system registers are
2617        * always enabled, so all our bits are RAZ/WI or RAO/WI.
2618        */
2619       .resetvalue = 0xf,
2620     },
2621     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
2622       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
2623       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2624       .access = PL3_RW,
2625       .readfn = icc_igrpen1_el3_read,
2626       .writefn = icc_igrpen1_el3_write,
2627     },
2628 };
2629 
2630 static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
2631     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
2632       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
2633       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2634       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2635       .readfn = icc_ap_read,
2636       .writefn = icc_ap_write,
2637     },
2638     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
2639       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
2640       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2641       .access = PL1_RW, .accessfn = gicv3_irq_access,
2642       .readfn = icc_ap_read,
2643       .writefn = icc_ap_write,
2644     },
2645 };
2646 
2647 static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
2648     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
2649       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
2650       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2651       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2652       .readfn = icc_ap_read,
2653       .writefn = icc_ap_write,
2654     },
2655     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
2656       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
2657       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2658       .access = PL1_RW, .accessfn = gicv3_fiq_access,
2659       .readfn = icc_ap_read,
2660       .writefn = icc_ap_write,
2661     },
2662     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
2663       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
2664       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2665       .access = PL1_RW, .accessfn = gicv3_irq_access,
2666       .readfn = icc_ap_read,
2667       .writefn = icc_ap_write,
2668     },
2669     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
2670       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
2671       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2672       .access = PL1_RW, .accessfn = gicv3_irq_access,
2673       .readfn = icc_ap_read,
2674       .writefn = icc_ap_write,
2675     },
2676 };
2677 
2678 static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = {
2679     { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH,
2680       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5,
2681       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2682       .access = PL1_R, .accessfn = gicv3_irq_access,
2683       .readfn = icc_nmiar1_read,
2684     },
2685 };
2686 
2687 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2688 {
2689     GICv3CPUState *cs = icc_cs_from_env(env);
2690     int regno = ri->opc2 & 3;
2691     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2692     uint64_t value;
2693 
2694     value = cs->ich_apr[grp][regno];
2695     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2696     return value;
2697 }
2698 
2699 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2700                          uint64_t value)
2701 {
2702     GICv3CPUState *cs = icc_cs_from_env(env);
2703     int regno = ri->opc2 & 3;
2704     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
2705 
2706     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
2707 
2708     if (cs->nmi_support) {
2709         cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI);
2710     } else {
2711         cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
2712     }
2713     gicv3_cpuif_virt_irq_fiq_update(cs);
2714 }
2715 
2716 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2717 {
2718     GICv3CPUState *cs = icc_cs_from_env(env);
2719     uint64_t value = cs->ich_hcr_el2;
2720 
2721     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
2722     return value;
2723 }
2724 
2725 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2726                           uint64_t value)
2727 {
2728     GICv3CPUState *cs = icc_cs_from_env(env);
2729 
2730     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
2731 
2732     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
2733         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
2734         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
2735         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
2736         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
2737 
2738     cs->ich_hcr_el2 = value;
2739     gicv3_cpuif_virt_update(cs);
2740 }
2741 
2742 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2743 {
2744     GICv3CPUState *cs = icc_cs_from_env(env);
2745     uint64_t value = cs->ich_vmcr_el2;
2746 
2747     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
2748     return value;
2749 }
2750 
2751 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2752                          uint64_t value)
2753 {
2754     GICv3CPUState *cs = icc_cs_from_env(env);
2755 
2756     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
2757 
2758     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
2759         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
2760         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
2761     value |= ICH_VMCR_EL2_VFIQEN;
2762 
2763     cs->ich_vmcr_el2 = value;
2764     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
2765      * by reading and writing back the fields.
2766      */
2767     write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
2768     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
2769 
2770     gicv3_cpuif_virt_update(cs);
2771 }
2772 
2773 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2774 {
2775     GICv3CPUState *cs = icc_cs_from_env(env);
2776     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2777     uint64_t value;
2778 
2779     /* This read function handles all of:
2780      * 64-bit reads of the whole LR
2781      * 32-bit reads of the low half of the LR
2782      * 32-bit reads of the high half of the LR
2783      */
2784     if (ri->state == ARM_CP_STATE_AA32) {
2785         if (ri->crm >= 14) {
2786             value = extract64(cs->ich_lr_el2[regno], 32, 32);
2787             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
2788         } else {
2789             value = extract64(cs->ich_lr_el2[regno], 0, 32);
2790             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
2791         }
2792     } else {
2793         value = cs->ich_lr_el2[regno];
2794         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
2795     }
2796 
2797     return value;
2798 }
2799 
2800 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2801                          uint64_t value)
2802 {
2803     GICv3CPUState *cs = icc_cs_from_env(env);
2804     int regno = ri->opc2 | ((ri->crm & 1) << 3);
2805 
2806     /* This write function handles all of:
2807      * 64-bit writes to the whole LR
2808      * 32-bit writes to the low half of the LR
2809      * 32-bit writes to the high half of the LR
2810      */
2811     if (ri->state == ARM_CP_STATE_AA32) {
2812         if (ri->crm >= 14) {
2813             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
2814             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
2815         } else {
2816             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
2817             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
2818         }
2819     } else {
2820         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
2821     }
2822 
2823     /* Enforce RES0 bits in priority field */
2824     if (cs->vpribits < 8) {
2825         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
2826                           8 - cs->vpribits, 0);
2827     }
2828 
2829     /* Enforce RES0 bit in NMI field when FEAT_GICv3_NMI is not implemented */
2830     if (!cs->nmi_support) {
2831         value &= ~ICH_LR_EL2_NMI;
2832     }
2833 
2834     cs->ich_lr_el2[regno] = value;
2835     gicv3_cpuif_virt_update(cs);
2836 }
2837 
2838 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2839 {
2840     GICv3CPUState *cs = icc_cs_from_env(env);
2841     uint64_t value;
2842 
2843     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
2844         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
2845         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
2846         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
2847         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
2848 
2849     if (cs->gic->revision < 4) {
2850         value |= ICH_VTR_EL2_NV4;
2851     }
2852 
2853     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
2854     return value;
2855 }
2856 
2857 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2858 {
2859     GICv3CPUState *cs = icc_cs_from_env(env);
2860     uint64_t value = maintenance_interrupt_state(cs);
2861 
2862     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
2863     return value;
2864 }
2865 
2866 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2867 {
2868     GICv3CPUState *cs = icc_cs_from_env(env);
2869     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
2870 
2871     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
2872     return value;
2873 }
2874 
2875 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2876 {
2877     GICv3CPUState *cs = icc_cs_from_env(env);
2878     uint64_t value = 0;
2879     int i;
2880 
2881     for (i = 0; i < cs->num_list_regs; i++) {
2882         uint64_t lr = cs->ich_lr_el2[i];
2883 
2884         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
2885             ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
2886             value |= (1 << i);
2887         }
2888     }
2889 
2890     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
2891     return value;
2892 }
2893 
2894 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
2895     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
2896       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
2897       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2898       .nv2_redirect_offset = 0x480,
2899       .access = PL2_RW,
2900       .readfn = ich_ap_read,
2901       .writefn = ich_ap_write,
2902     },
2903     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
2904       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
2905       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2906       .nv2_redirect_offset = 0x4a0,
2907       .access = PL2_RW,
2908       .readfn = ich_ap_read,
2909       .writefn = ich_ap_write,
2910     },
2911     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
2912       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
2913       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2914       .nv2_redirect_offset = 0x4c0,
2915       .access = PL2_RW,
2916       .readfn = ich_hcr_read,
2917       .writefn = ich_hcr_write,
2918     },
2919     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
2920       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
2921       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2922       .access = PL2_R,
2923       .readfn = ich_vtr_read,
2924     },
2925     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
2926       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
2927       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2928       .access = PL2_R,
2929       .readfn = ich_misr_read,
2930     },
2931     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
2932       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
2933       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2934       .access = PL2_R,
2935       .readfn = ich_eisr_read,
2936     },
2937     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
2938       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
2939       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2940       .access = PL2_R,
2941       .readfn = ich_elrsr_read,
2942     },
2943     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
2944       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
2945       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2946       .nv2_redirect_offset = 0x4c8,
2947       .access = PL2_RW,
2948       .readfn = ich_vmcr_read,
2949       .writefn = ich_vmcr_write,
2950     },
2951 };
2952 
2953 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
2954     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
2955       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
2956       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2957       .nv2_redirect_offset = 0x488,
2958       .access = PL2_RW,
2959       .readfn = ich_ap_read,
2960       .writefn = ich_ap_write,
2961     },
2962     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
2963       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
2964       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2965       .nv2_redirect_offset = 0x4a8,
2966       .access = PL2_RW,
2967       .readfn = ich_ap_read,
2968       .writefn = ich_ap_write,
2969     },
2970 };
2971 
2972 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
2973     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
2974       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
2975       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2976       .nv2_redirect_offset = 0x490,
2977       .access = PL2_RW,
2978       .readfn = ich_ap_read,
2979       .writefn = ich_ap_write,
2980     },
2981     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
2982       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
2983       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2984       .nv2_redirect_offset = 0x498,
2985       .access = PL2_RW,
2986       .readfn = ich_ap_read,
2987       .writefn = ich_ap_write,
2988     },
2989     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
2990       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
2991       .type = ARM_CP_IO | ARM_CP_NO_RAW,
2992       .nv2_redirect_offset = 0x4b0,
2993       .access = PL2_RW,
2994       .readfn = ich_ap_read,
2995       .writefn = ich_ap_write,
2996     },
2997     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
2998       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
2999       .type = ARM_CP_IO | ARM_CP_NO_RAW,
3000       .nv2_redirect_offset = 0x4b8,
3001       .access = PL2_RW,
3002       .readfn = ich_ap_read,
3003       .writefn = ich_ap_write,
3004     },
3005 };
3006 
3007 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
3008 {
3009     GICv3CPUState *cs = opaque;
3010 
3011     gicv3_cpuif_update(cs);
3012     /*
3013      * Because vLPIs are only pending in NonSecure state,
3014      * an EL change can change the VIRQ/VFIQ status (but
3015      * cannot affect the maintenance interrupt state)
3016      */
3017     gicv3_cpuif_virt_irq_fiq_update(cs);
3018 }
3019 
3020 void gicv3_init_cpuif(GICv3State *s)
3021 {
3022     /* Called from the GICv3 realize function; register our system
3023      * registers with the CPU
3024      */
3025     int i;
3026 
3027     for (i = 0; i < s->num_cpu; i++) {
3028         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
3029         GICv3CPUState *cs = &s->cpu[i];
3030 
3031         /*
3032          * If the CPU doesn't define a GICv3 configuration, probably because
3033          * in real hardware it doesn't have one, then we use default values
3034          * matching the one used by most Arm CPUs. This applies to:
3035          *  cpu->gic_num_lrs
3036          *  cpu->gic_vpribits
3037          *  cpu->gic_vprebits
3038          *  cpu->gic_pribits
3039          */
3040 
3041         /* Note that we can't just use the GICv3CPUState as an opaque pointer
3042          * in define_arm_cp_regs_with_opaque(), because when we're called back
3043          * it might be with code translated by CPU 0 but run by CPU 1, in
3044          * which case we'd get the wrong value.
3045          * So instead we define the regs with no ri->opaque info, and
3046          * get back to the GICv3CPUState from the CPUARMState.
3047          *
3048          * These CP regs callbacks can be called from either TCG or HVF code.
3049          */
3050         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
3051 
3052         /*
3053          * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
3054          * implement FEAT_GICv3_NMI, which is the CPU interface part
3055          * of NMI support. This is distinct from whether the GIC proper
3056          * (redistributors and distributor) have NMI support. In QEMU
3057          * that is a property of the GIC device in s->nmi_support;
3058          * cs->nmi_support indicates the CPU interface's support.
3059          */
3060         if (cpu_isar_feature(aa64_nmi, cpu)) {
3061             cs->nmi_support = true;
3062             define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
3063         }
3064 
3065         /*
3066          * The CPU implementation specifies the number of supported
3067          * bits of physical priority. For backwards compatibility
3068          * of migration, we have a compat property that forces use
3069          * of 8 priority bits regardless of what the CPU really has.
3070          */
3071         if (s->force_8bit_prio) {
3072             cs->pribits = 8;
3073         } else {
3074             cs->pribits = cpu->gic_pribits ?: 5;
3075         }
3076 
3077         /*
3078          * The GICv3 has separate ID register fields for virtual priority
3079          * and preemption bit values, but only a single ID register field
3080          * for the physical priority bits. The preemption bit count is
3081          * always the same as the priority bit count, except that 8 bits
3082          * of priority means 7 preemption bits. We precalculate the
3083          * preemption bits because it simplifies the code and makes the
3084          * parallels between the virtual and physical bits of the GIC
3085          * a bit clearer.
3086          */
3087         cs->prebits = cs->pribits;
3088         if (cs->prebits == 8) {
3089             cs->prebits--;
3090         }
3091         /*
3092          * Check that CPU code defining pribits didn't violate
3093          * architectural constraints our implementation relies on.
3094          */
3095         g_assert(cs->pribits >= 4 && cs->pribits <= 8);
3096 
3097         /*
3098          * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
3099          * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
3100          */
3101         if (cs->prebits >= 6) {
3102             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
3103         }
3104         if (cs->prebits == 7) {
3105             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
3106         }
3107 
3108         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
3109             int j;
3110 
3111             cs->num_list_regs = cpu->gic_num_lrs ?: 4;
3112             cs->vpribits = cpu->gic_vpribits ?: 5;
3113             cs->vprebits = cpu->gic_vprebits ?: 5;
3114 
3115             /* Check against architectural constraints: getting these
3116              * wrong would be a bug in the CPU code defining these,
3117              * and the implementation relies on them holding.
3118              */
3119             g_assert(cs->vprebits <= cs->vpribits);
3120             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
3121             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
3122 
3123             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
3124 
3125             for (j = 0; j < cs->num_list_regs; j++) {
3126                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
3127                  * are split into two cp15 regs, LR (the low part, with the
3128                  * same encoding as the AArch64 LR) and LRC (the high part).
3129                  */
3130                 ARMCPRegInfo lr_regset[] = {
3131                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
3132                       .opc0 = 3, .opc1 = 4, .crn = 12,
3133                       .crm = 12 + (j >> 3), .opc2 = j & 7,
3134                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
3135                       .nv2_redirect_offset = 0x400 + 8 * j,
3136                       .access = PL2_RW,
3137                       .readfn = ich_lr_read,
3138                       .writefn = ich_lr_write,
3139                     },
3140                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
3141                       .cp = 15, .opc1 = 4, .crn = 12,
3142                       .crm = 14 + (j >> 3), .opc2 = j & 7,
3143                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
3144                       .access = PL2_RW,
3145                       .readfn = ich_lr_read,
3146                       .writefn = ich_lr_write,
3147                     },
3148                 };
3149                 define_arm_cp_regs(cpu, lr_regset);
3150             }
3151             if (cs->vprebits >= 6) {
3152                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
3153             }
3154             if (cs->vprebits == 7) {
3155                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
3156             }
3157         }
3158         if (tcg_enabled() || qtest_enabled()) {
3159             /*
3160              * We can only trap EL changes with TCG. However the GIC interrupt
3161              * state only changes on EL changes involving EL2 or EL3, so for
3162              * the non-TCG case this is OK, as EL2 and EL3 can't exist.
3163              */
3164             arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
3165         } else {
3166             assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
3167             assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));
3168         }
3169     }
3170 }
3171