xref: /qemu/hw/intc/arm_gicv3_its.c (revision 7eb54267f243a336deaf3c806a5b5422365ee861)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
83 {
84     uint64_t result = 0;
85 
86     switch (page_sz) {
87     case GITS_PAGE_SIZE_4K:
88     case GITS_PAGE_SIZE_16K:
89         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90         break;
91 
92     case GITS_PAGE_SIZE_64K:
93         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95         break;
96 
97     default:
98         break;
99     }
100     return result;
101 }
102 
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104                                  uint32_t idx, MemTxResult *res)
105 {
106     /*
107      * Given a TableDesc describing one of the ITS in-guest-memory
108      * tables and an index into it, return the guest address
109      * corresponding to that table entry.
110      * If there was a memory error reading the L1 table of an
111      * indirect table, *res is set accordingly, and we return -1.
112      * If the L1 table entry is marked not valid, we return -1 with
113      * *res set to MEMTX_OK.
114      *
115      * The specification defines the format of level 1 entries of a
116      * 2-level table, but the format of level 2 entries and the format
117      * of flat-mapped tables is IMPDEF.
118      */
119     AddressSpace *as = &s->gicv3->dma_as;
120     uint32_t l2idx;
121     uint64_t l2;
122     uint32_t num_l2_entries;
123 
124     *res = MEMTX_OK;
125 
126     if (!td->indirect) {
127         /* Single level table */
128         return td->base_addr + idx * td->entry_sz;
129     }
130 
131     /* Two level table */
132     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
133 
134     l2 = address_space_ldq_le(as,
135                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136                               MEMTXATTRS_UNSPECIFIED, res);
137     if (*res != MEMTX_OK) {
138         return -1;
139     }
140     if (!(l2 & L2_TABLE_VALID_MASK)) {
141         return -1;
142     }
143 
144     num_l2_entries = td->page_sz / td->entry_sz;
145     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
146 }
147 
148 /*
149  * Read the Collection Table entry at index @icid. On success (including
150  * successfully determining that there is no valid CTE for this index),
151  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152  * If there is an error reading memory then we return the error code.
153  */
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
155 {
156     AddressSpace *as = &s->gicv3->dma_as;
157     MemTxResult res = MEMTX_OK;
158     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159     uint64_t cteval;
160 
161     if (entry_addr == -1) {
162         /* No L2 table entry, i.e. no valid CTE, or a memory error */
163         cte->valid = false;
164         return res;
165     }
166 
167     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168     if (res != MEMTX_OK) {
169         return res;
170     }
171     cte->valid = FIELD_EX64(cteval, CTE, VALID);
172     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173     return MEMTX_OK;
174 }
175 
176 /*
177  * Update the Interrupt Table entry at index @evinted in the table specified
178  * by the dte @dte. Returns true on success, false if there was a memory
179  * access error.
180  */
181 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
182                        const ITEntry *ite)
183 {
184     AddressSpace *as = &s->gicv3->dma_as;
185     MemTxResult res = MEMTX_OK;
186     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
187     uint64_t itel = 0;
188     uint32_t iteh = 0;
189 
190     if (ite->valid) {
191         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
192         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
193         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
194         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
195         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
196         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
197     }
198 
199     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
200     if (res != MEMTX_OK) {
201         return false;
202     }
203     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
204     return res == MEMTX_OK;
205 }
206 
207 /*
208  * Read the Interrupt Table entry at index @eventid from the table specified
209  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
210  * struct @ite accordingly. If there is an error reading memory then we return
211  * the error code.
212  */
213 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
214                            const DTEntry *dte, ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     uint64_t itel;
219     uint32_t iteh;
220     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
221 
222     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
223     if (res != MEMTX_OK) {
224         return res;
225     }
226 
227     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
228     if (res != MEMTX_OK) {
229         return res;
230     }
231 
232     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
233     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
234     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
235     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
236     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
237     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
238     return MEMTX_OK;
239 }
240 
241 /*
242  * Read the Device Table entry at index @devid. On success (including
243  * successfully determining that there is no valid DTE for this index),
244  * we return MEMTX_OK and populate the DTEntry struct accordingly.
245  * If there is an error reading memory then we return the error code.
246  */
247 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
248 {
249     MemTxResult res = MEMTX_OK;
250     AddressSpace *as = &s->gicv3->dma_as;
251     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
252     uint64_t dteval;
253 
254     if (entry_addr == -1) {
255         /* No L2 table entry, i.e. no valid DTE, or a memory error */
256         dte->valid = false;
257         return res;
258     }
259     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
260     if (res != MEMTX_OK) {
261         return res;
262     }
263     dte->valid = FIELD_EX64(dteval, DTE, VALID);
264     dte->size = FIELD_EX64(dteval, DTE, SIZE);
265     /* DTE word field stores bits [51:8] of the ITT address */
266     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
267     return MEMTX_OK;
268 }
269 
270 /*
271  * This function handles the processing of following commands based on
272  * the ItsCmdType parameter passed:-
273  * 1. triggering of lpi interrupt translation via ITS INT command
274  * 2. triggering of lpi interrupt translation via gits_translater register
275  * 3. handling of ITS CLEAR command
276  * 4. handling of ITS DISCARD command
277  */
278 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
279                                        uint32_t eventid, ItsCmdType cmd)
280 {
281     uint64_t num_eventids;
282     DTEntry dte;
283     CTEntry cte;
284     ITEntry ite;
285 
286     if (devid >= s->dt.num_entries) {
287         qemu_log_mask(LOG_GUEST_ERROR,
288                       "%s: invalid command attributes: devid %d>=%d",
289                       __func__, devid, s->dt.num_entries);
290         return CMD_CONTINUE;
291     }
292 
293     if (get_dte(s, devid, &dte) != MEMTX_OK) {
294         return CMD_STALL;
295     }
296     if (!dte.valid) {
297         qemu_log_mask(LOG_GUEST_ERROR,
298                       "%s: invalid command attributes: "
299                       "invalid dte for %d\n", __func__, devid);
300         return CMD_CONTINUE;
301     }
302 
303     num_eventids = 1ULL << (dte.size + 1);
304     if (eventid >= num_eventids) {
305         qemu_log_mask(LOG_GUEST_ERROR,
306                       "%s: invalid command attributes: eventid %d >= %"
307                       PRId64 "\n",
308                       __func__, eventid, num_eventids);
309         return CMD_CONTINUE;
310     }
311 
312     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
313         return CMD_STALL;
314     }
315 
316     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
317         qemu_log_mask(LOG_GUEST_ERROR,
318                       "%s: invalid command attributes: invalid ITE\n",
319                       __func__);
320         return CMD_CONTINUE;
321     }
322 
323     if (ite.icid >= s->ct.num_entries) {
324         qemu_log_mask(LOG_GUEST_ERROR,
325                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
326                       __func__, ite.icid);
327         return CMD_CONTINUE;
328     }
329 
330     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
331         return CMD_STALL;
332     }
333     if (!cte.valid) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: invalid CTE\n",
336                       __func__);
337         return CMD_CONTINUE;
338     }
339 
340     /*
341      * Current implementation only supports rdbase == procnum
342      * Hence rdbase physical address is ignored
343      */
344     if (cte.rdbase >= s->gicv3->num_cpu) {
345         return CMD_CONTINUE;
346     }
347 
348     if ((cmd == CLEAR) || (cmd == DISCARD)) {
349         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
350     } else {
351         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
352     }
353 
354     if (cmd == DISCARD) {
355         ITEntry ite = {};
356         /* remove mapping from interrupt translation table */
357         ite.valid = false;
358         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
359     }
360     return CMD_CONTINUE;
361 }
362 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
363                                     ItsCmdType cmd)
364 {
365     uint32_t devid, eventid;
366 
367     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
368     eventid = cmdpkt[1] & EVENTID_MASK;
369     return do_process_its_cmd(s, devid, eventid, cmd);
370 }
371 
372 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
373                                   bool ignore_pInt)
374 {
375     uint32_t devid, eventid;
376     uint32_t pIntid = 0;
377     uint64_t num_eventids;
378     uint32_t num_intids;
379     uint16_t icid = 0;
380     DTEntry dte;
381     ITEntry ite;
382 
383     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
384     eventid = cmdpkt[1] & EVENTID_MASK;
385 
386     if (ignore_pInt) {
387         pIntid = eventid;
388     } else {
389         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
390     }
391 
392     icid = cmdpkt[2] & ICID_MASK;
393 
394     if (devid >= s->dt.num_entries) {
395         qemu_log_mask(LOG_GUEST_ERROR,
396                       "%s: invalid command attributes: devid %d>=%d",
397                       __func__, devid, s->dt.num_entries);
398         return CMD_CONTINUE;
399     }
400 
401     if (get_dte(s, devid, &dte) != MEMTX_OK) {
402         return CMD_STALL;
403     }
404     num_eventids = 1ULL << (dte.size + 1);
405     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
406 
407     if ((icid >= s->ct.num_entries)
408             || !dte.valid || (eventid >= num_eventids) ||
409             (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
410              (pIntid != INTID_SPURIOUS))) {
411         qemu_log_mask(LOG_GUEST_ERROR,
412                       "%s: invalid command attributes "
413                       "icid %d or eventid %d or pIntid %d or"
414                       "unmapped dte %d\n", __func__, icid, eventid,
415                       pIntid, dte.valid);
416         /*
417          * in this implementation, in case of error
418          * we ignore this command and move onto the next
419          * command in the queue
420          */
421         return CMD_CONTINUE;
422     }
423 
424     /* add ite entry to interrupt translation table */
425     ite.valid = true;
426     ite.inttype = ITE_INTTYPE_PHYSICAL;
427     ite.intid = pIntid;
428     ite.icid = icid;
429     ite.doorbell = INTID_SPURIOUS;
430     ite.vpeid = 0;
431     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
432 }
433 
434 /*
435  * Update the Collection Table entry for @icid to @cte. Returns true
436  * on success, false if there was a memory access error.
437  */
438 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
439 {
440     AddressSpace *as = &s->gicv3->dma_as;
441     uint64_t entry_addr;
442     uint64_t cteval = 0;
443     MemTxResult res = MEMTX_OK;
444 
445     if (!s->ct.valid) {
446         return true;
447     }
448 
449     if (cte->valid) {
450         /* add mapping entry to collection table */
451         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
452         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
453     }
454 
455     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
456     if (res != MEMTX_OK) {
457         /* memory access error: stall */
458         return false;
459     }
460     if (entry_addr == -1) {
461         /* No L2 table for this index: discard write and continue */
462         return true;
463     }
464 
465     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
466     return res == MEMTX_OK;
467 }
468 
469 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
470 {
471     uint16_t icid;
472     CTEntry cte;
473 
474     icid = cmdpkt[2] & ICID_MASK;
475 
476     cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
477     cte.rdbase &= RDBASE_PROCNUM_MASK;
478 
479     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
480 
481     if ((icid >= s->ct.num_entries) || (cte.rdbase >= s->gicv3->num_cpu)) {
482         qemu_log_mask(LOG_GUEST_ERROR,
483                       "ITS MAPC: invalid collection table attributes "
484                       "icid %d rdbase %u\n",  icid, cte.rdbase);
485         /*
486          * in this implementation, in case of error
487          * we ignore this command and move onto the next
488          * command in the queue
489          */
490         return CMD_CONTINUE;
491     }
492 
493     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
494 }
495 
496 /*
497  * Update the Device Table entry for @devid to @dte. Returns true
498  * on success, false if there was a memory access error.
499  */
500 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
501 {
502     AddressSpace *as = &s->gicv3->dma_as;
503     uint64_t entry_addr;
504     uint64_t dteval = 0;
505     MemTxResult res = MEMTX_OK;
506 
507     if (s->dt.valid) {
508         if (dte->valid) {
509             /* add mapping entry to device table */
510             dteval = FIELD_DP64(dteval, DTE, VALID, 1);
511             dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
512             dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
513         }
514     } else {
515         return true;
516     }
517 
518     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
519     if (res != MEMTX_OK) {
520         /* memory access error: stall */
521         return false;
522     }
523     if (entry_addr == -1) {
524         /* No L2 table for this index: discard write and continue */
525         return true;
526     }
527     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
528     return res == MEMTX_OK;
529 }
530 
531 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
532 {
533     uint32_t devid;
534     DTEntry dte;
535 
536     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
537     dte.size = cmdpkt[1] & SIZE_MASK;
538     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
539     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
540 
541     if ((devid >= s->dt.num_entries) ||
542         (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
543         qemu_log_mask(LOG_GUEST_ERROR,
544                       "ITS MAPD: invalid device table attributes "
545                       "devid %d or size %d\n", devid, dte.size);
546         /*
547          * in this implementation, in case of error
548          * we ignore this command and move onto the next
549          * command in the queue
550          */
551         return CMD_CONTINUE;
552     }
553 
554     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
555 }
556 
557 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
558 {
559     uint64_t rd1, rd2;
560 
561     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
562     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
563 
564     if (rd1 >= s->gicv3->num_cpu) {
565         qemu_log_mask(LOG_GUEST_ERROR,
566                       "%s: RDBASE1 %" PRId64
567                       " out of range (must be less than %d)\n",
568                       __func__, rd1, s->gicv3->num_cpu);
569         return CMD_CONTINUE;
570     }
571     if (rd2 >= s->gicv3->num_cpu) {
572         qemu_log_mask(LOG_GUEST_ERROR,
573                       "%s: RDBASE2 %" PRId64
574                       " out of range (must be less than %d)\n",
575                       __func__, rd2, s->gicv3->num_cpu);
576         return CMD_CONTINUE;
577     }
578 
579     if (rd1 == rd2) {
580         /* Move to same target must succeed as a no-op */
581         return CMD_CONTINUE;
582     }
583 
584     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
585     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
586 
587     return CMD_CONTINUE;
588 }
589 
590 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
591 {
592     uint32_t devid, eventid;
593     uint16_t new_icid;
594     uint64_t num_eventids;
595     DTEntry dte;
596     CTEntry old_cte, new_cte;
597     ITEntry old_ite;
598 
599     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
600     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
601     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
602 
603     if (devid >= s->dt.num_entries) {
604         qemu_log_mask(LOG_GUEST_ERROR,
605                       "%s: invalid command attributes: devid %d>=%d",
606                       __func__, devid, s->dt.num_entries);
607         return CMD_CONTINUE;
608     }
609     if (get_dte(s, devid, &dte) != MEMTX_OK) {
610         return CMD_STALL;
611     }
612 
613     if (!dte.valid) {
614         qemu_log_mask(LOG_GUEST_ERROR,
615                       "%s: invalid command attributes: "
616                       "invalid dte for %d\n", __func__, devid);
617         return CMD_CONTINUE;
618     }
619 
620     num_eventids = 1ULL << (dte.size + 1);
621     if (eventid >= num_eventids) {
622         qemu_log_mask(LOG_GUEST_ERROR,
623                       "%s: invalid command attributes: eventid %d >= %"
624                       PRId64 "\n",
625                       __func__, eventid, num_eventids);
626         return CMD_CONTINUE;
627     }
628 
629     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
630         return CMD_STALL;
631     }
632 
633     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
634         qemu_log_mask(LOG_GUEST_ERROR,
635                       "%s: invalid command attributes: invalid ITE\n",
636                       __func__);
637         return CMD_CONTINUE;
638     }
639 
640     if (old_ite.icid >= s->ct.num_entries) {
641         qemu_log_mask(LOG_GUEST_ERROR,
642                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
643                       __func__, old_ite.icid);
644         return CMD_CONTINUE;
645     }
646 
647     if (new_icid >= s->ct.num_entries) {
648         qemu_log_mask(LOG_GUEST_ERROR,
649                       "%s: invalid command attributes: ICID 0x%x\n",
650                       __func__, new_icid);
651         return CMD_CONTINUE;
652     }
653 
654     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
655         return CMD_STALL;
656     }
657     if (!old_cte.valid) {
658         qemu_log_mask(LOG_GUEST_ERROR,
659                       "%s: invalid command attributes: "
660                       "invalid CTE for old ICID 0x%x\n",
661                       __func__, old_ite.icid);
662         return CMD_CONTINUE;
663     }
664 
665     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
666         return CMD_STALL;
667     }
668     if (!new_cte.valid) {
669         qemu_log_mask(LOG_GUEST_ERROR,
670                       "%s: invalid command attributes: "
671                       "invalid CTE for new ICID 0x%x\n",
672                       __func__, new_icid);
673         return CMD_CONTINUE;
674     }
675 
676     if (old_cte.rdbase >= s->gicv3->num_cpu) {
677         qemu_log_mask(LOG_GUEST_ERROR,
678                       "%s: CTE has invalid rdbase 0x%x\n",
679                       __func__, old_cte.rdbase);
680         return CMD_CONTINUE;
681     }
682 
683     if (new_cte.rdbase >= s->gicv3->num_cpu) {
684         qemu_log_mask(LOG_GUEST_ERROR,
685                       "%s: CTE has invalid rdbase 0x%x\n",
686                       __func__, new_cte.rdbase);
687         return CMD_CONTINUE;
688     }
689 
690     if (old_cte.rdbase != new_cte.rdbase) {
691         /* Move the LPI from the old redistributor to the new one */
692         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
693                              &s->gicv3->cpu[new_cte.rdbase],
694                              old_ite.intid);
695     }
696 
697     /* Update the ICID field in the interrupt translation table entry */
698     old_ite.icid = new_icid;
699     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
700 }
701 
702 /*
703  * Current implementation blocks until all
704  * commands are processed
705  */
706 static void process_cmdq(GICv3ITSState *s)
707 {
708     uint32_t wr_offset = 0;
709     uint32_t rd_offset = 0;
710     uint32_t cq_offset = 0;
711     AddressSpace *as = &s->gicv3->dma_as;
712     uint8_t cmd;
713     int i;
714 
715     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
716         return;
717     }
718 
719     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
720 
721     if (wr_offset >= s->cq.num_entries) {
722         qemu_log_mask(LOG_GUEST_ERROR,
723                       "%s: invalid write offset "
724                       "%d\n", __func__, wr_offset);
725         return;
726     }
727 
728     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
729 
730     if (rd_offset >= s->cq.num_entries) {
731         qemu_log_mask(LOG_GUEST_ERROR,
732                       "%s: invalid read offset "
733                       "%d\n", __func__, rd_offset);
734         return;
735     }
736 
737     while (wr_offset != rd_offset) {
738         ItsCmdResult result = CMD_CONTINUE;
739         void *hostmem;
740         hwaddr buflen;
741         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
742 
743         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
744 
745         buflen = GITS_CMDQ_ENTRY_SIZE;
746         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
747                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
748         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
749             if (hostmem) {
750                 address_space_unmap(as, hostmem, buflen, false, 0);
751             }
752             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
753             qemu_log_mask(LOG_GUEST_ERROR,
754                           "%s: could not read command at 0x%" PRIx64 "\n",
755                           __func__, s->cq.base_addr + cq_offset);
756             break;
757         }
758         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
759             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
760         }
761         address_space_unmap(as, hostmem, buflen, false, 0);
762 
763         cmd = cmdpkt[0] & CMD_MASK;
764 
765         trace_gicv3_its_process_command(rd_offset, cmd);
766 
767         switch (cmd) {
768         case GITS_CMD_INT:
769             result = process_its_cmd(s, cmdpkt, INTERRUPT);
770             break;
771         case GITS_CMD_CLEAR:
772             result = process_its_cmd(s, cmdpkt, CLEAR);
773             break;
774         case GITS_CMD_SYNC:
775             /*
776              * Current implementation makes a blocking synchronous call
777              * for every command issued earlier, hence the internal state
778              * is already consistent by the time SYNC command is executed.
779              * Hence no further processing is required for SYNC command.
780              */
781             break;
782         case GITS_CMD_MAPD:
783             result = process_mapd(s, cmdpkt);
784             break;
785         case GITS_CMD_MAPC:
786             result = process_mapc(s, cmdpkt);
787             break;
788         case GITS_CMD_MAPTI:
789             result = process_mapti(s, cmdpkt, false);
790             break;
791         case GITS_CMD_MAPI:
792             result = process_mapti(s, cmdpkt, true);
793             break;
794         case GITS_CMD_DISCARD:
795             result = process_its_cmd(s, cmdpkt, DISCARD);
796             break;
797         case GITS_CMD_INV:
798         case GITS_CMD_INVALL:
799             /*
800              * Current implementation doesn't cache any ITS tables,
801              * but the calculated lpi priority information. We only
802              * need to trigger lpi priority re-calculation to be in
803              * sync with LPI config table or pending table changes.
804              */
805             for (i = 0; i < s->gicv3->num_cpu; i++) {
806                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
807             }
808             break;
809         case GITS_CMD_MOVI:
810             result = process_movi(s, cmdpkt);
811             break;
812         case GITS_CMD_MOVALL:
813             result = process_movall(s, cmdpkt);
814             break;
815         default:
816             break;
817         }
818         if (result == CMD_CONTINUE) {
819             rd_offset++;
820             rd_offset %= s->cq.num_entries;
821             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
822         } else {
823             /* CMD_STALL */
824             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
825             qemu_log_mask(LOG_GUEST_ERROR,
826                           "%s: 0x%x cmd processing failed, stalling\n",
827                           __func__, cmd);
828             break;
829         }
830     }
831 }
832 
833 /*
834  * This function extracts the ITS Device and Collection table specific
835  * parameters (like base_addr, size etc) from GITS_BASER register.
836  * It is called during ITS enable and also during post_load migration
837  */
838 static void extract_table_params(GICv3ITSState *s)
839 {
840     uint16_t num_pages = 0;
841     uint8_t  page_sz_type;
842     uint8_t type;
843     uint32_t page_sz = 0;
844     uint64_t value;
845 
846     for (int i = 0; i < 8; i++) {
847         TableDesc *td;
848         int idbits;
849 
850         value = s->baser[i];
851 
852         if (!value) {
853             continue;
854         }
855 
856         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
857 
858         switch (page_sz_type) {
859         case 0:
860             page_sz = GITS_PAGE_SIZE_4K;
861             break;
862 
863         case 1:
864             page_sz = GITS_PAGE_SIZE_16K;
865             break;
866 
867         case 2:
868         case 3:
869             page_sz = GITS_PAGE_SIZE_64K;
870             break;
871 
872         default:
873             g_assert_not_reached();
874         }
875 
876         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
877 
878         type = FIELD_EX64(value, GITS_BASER, TYPE);
879 
880         switch (type) {
881         case GITS_BASER_TYPE_DEVICE:
882             td = &s->dt;
883             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
884             break;
885         case GITS_BASER_TYPE_COLLECTION:
886             td = &s->ct;
887             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
888                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
889             } else {
890                 /* 16-bit CollectionId supported when CIL == 0 */
891                 idbits = 16;
892             }
893             break;
894         default:
895             /*
896              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
897              * ensures we will only see type values corresponding to
898              * the values set up in gicv3_its_reset().
899              */
900             g_assert_not_reached();
901         }
902 
903         memset(td, 0, sizeof(*td));
904         td->valid = FIELD_EX64(value, GITS_BASER, VALID);
905         /*
906          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
907          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
908          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
909          * for the register corresponding to the Collection table but we
910          * still have to process interrupts using non-memory-backed
911          * Collection table entries.)
912          */
913         if (!td->valid) {
914             continue;
915         }
916         td->page_sz = page_sz;
917         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
918         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
919         td->base_addr = baser_base_addr(value, page_sz);
920         if (!td->indirect) {
921             td->num_entries = (num_pages * page_sz) / td->entry_sz;
922         } else {
923             td->num_entries = (((num_pages * page_sz) /
924                                   L1TABLE_ENTRY_SIZE) *
925                                  (page_sz / td->entry_sz));
926         }
927         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
928     }
929 }
930 
931 static void extract_cmdq_params(GICv3ITSState *s)
932 {
933     uint16_t num_pages = 0;
934     uint64_t value = s->cbaser;
935 
936     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
937 
938     memset(&s->cq, 0 , sizeof(s->cq));
939     s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
940 
941     if (s->cq.valid) {
942         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
943                              GITS_CMDQ_ENTRY_SIZE;
944         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
945         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
946     }
947 }
948 
949 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
950                                               uint64_t *data, unsigned size,
951                                               MemTxAttrs attrs)
952 {
953     /*
954      * GITS_TRANSLATER is write-only, and all other addresses
955      * in the interrupt translation space frame are RES0.
956      */
957     *data = 0;
958     return MEMTX_OK;
959 }
960 
961 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
962                                                uint64_t data, unsigned size,
963                                                MemTxAttrs attrs)
964 {
965     GICv3ITSState *s = (GICv3ITSState *)opaque;
966     bool result = true;
967 
968     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
969 
970     switch (offset) {
971     case GITS_TRANSLATER:
972         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
973             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
974         }
975         break;
976     default:
977         break;
978     }
979 
980     if (result) {
981         return MEMTX_OK;
982     } else {
983         return MEMTX_ERROR;
984     }
985 }
986 
987 static bool its_writel(GICv3ITSState *s, hwaddr offset,
988                               uint64_t value, MemTxAttrs attrs)
989 {
990     bool result = true;
991     int index;
992 
993     switch (offset) {
994     case GITS_CTLR:
995         if (value & R_GITS_CTLR_ENABLED_MASK) {
996             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
997             extract_table_params(s);
998             extract_cmdq_params(s);
999             process_cmdq(s);
1000         } else {
1001             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1002         }
1003         break;
1004     case GITS_CBASER:
1005         /*
1006          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1007          *                 already enabled
1008          */
1009         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1010             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1011             s->creadr = 0;
1012         }
1013         break;
1014     case GITS_CBASER + 4:
1015         /*
1016          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1017          *                 already enabled
1018          */
1019         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1020             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1021             s->creadr = 0;
1022         }
1023         break;
1024     case GITS_CWRITER:
1025         s->cwriter = deposit64(s->cwriter, 0, 32,
1026                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1027         if (s->cwriter != s->creadr) {
1028             process_cmdq(s);
1029         }
1030         break;
1031     case GITS_CWRITER + 4:
1032         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1033         break;
1034     case GITS_CREADR:
1035         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1036             s->creadr = deposit64(s->creadr, 0, 32,
1037                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1038         } else {
1039             /* RO register, ignore the write */
1040             qemu_log_mask(LOG_GUEST_ERROR,
1041                           "%s: invalid guest write to RO register at offset "
1042                           TARGET_FMT_plx "\n", __func__, offset);
1043         }
1044         break;
1045     case GITS_CREADR + 4:
1046         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1047             s->creadr = deposit64(s->creadr, 32, 32, value);
1048         } else {
1049             /* RO register, ignore the write */
1050             qemu_log_mask(LOG_GUEST_ERROR,
1051                           "%s: invalid guest write to RO register at offset "
1052                           TARGET_FMT_plx "\n", __func__, offset);
1053         }
1054         break;
1055     case GITS_BASER ... GITS_BASER + 0x3f:
1056         /*
1057          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1058          *                 already enabled
1059          */
1060         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1061             index = (offset - GITS_BASER) / 8;
1062 
1063             if (s->baser[index] == 0) {
1064                 /* Unimplemented GITS_BASERn: RAZ/WI */
1065                 break;
1066             }
1067             if (offset & 7) {
1068                 value <<= 32;
1069                 value &= ~GITS_BASER_RO_MASK;
1070                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1071                 s->baser[index] |= value;
1072             } else {
1073                 value &= ~GITS_BASER_RO_MASK;
1074                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1075                 s->baser[index] |= value;
1076             }
1077         }
1078         break;
1079     case GITS_IIDR:
1080     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1081         /* RO registers, ignore the write */
1082         qemu_log_mask(LOG_GUEST_ERROR,
1083                       "%s: invalid guest write to RO register at offset "
1084                       TARGET_FMT_plx "\n", __func__, offset);
1085         break;
1086     default:
1087         result = false;
1088         break;
1089     }
1090     return result;
1091 }
1092 
1093 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1094                              uint64_t *data, MemTxAttrs attrs)
1095 {
1096     bool result = true;
1097     int index;
1098 
1099     switch (offset) {
1100     case GITS_CTLR:
1101         *data = s->ctlr;
1102         break;
1103     case GITS_IIDR:
1104         *data = gicv3_iidr();
1105         break;
1106     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1107         /* ID registers */
1108         *data = gicv3_idreg(offset - GITS_IDREGS);
1109         break;
1110     case GITS_TYPER:
1111         *data = extract64(s->typer, 0, 32);
1112         break;
1113     case GITS_TYPER + 4:
1114         *data = extract64(s->typer, 32, 32);
1115         break;
1116     case GITS_CBASER:
1117         *data = extract64(s->cbaser, 0, 32);
1118         break;
1119     case GITS_CBASER + 4:
1120         *data = extract64(s->cbaser, 32, 32);
1121         break;
1122     case GITS_CREADR:
1123         *data = extract64(s->creadr, 0, 32);
1124         break;
1125     case GITS_CREADR + 4:
1126         *data = extract64(s->creadr, 32, 32);
1127         break;
1128     case GITS_CWRITER:
1129         *data = extract64(s->cwriter, 0, 32);
1130         break;
1131     case GITS_CWRITER + 4:
1132         *data = extract64(s->cwriter, 32, 32);
1133         break;
1134     case GITS_BASER ... GITS_BASER + 0x3f:
1135         index = (offset - GITS_BASER) / 8;
1136         if (offset & 7) {
1137             *data = extract64(s->baser[index], 32, 32);
1138         } else {
1139             *data = extract64(s->baser[index], 0, 32);
1140         }
1141         break;
1142     default:
1143         result = false;
1144         break;
1145     }
1146     return result;
1147 }
1148 
1149 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1150                                uint64_t value, MemTxAttrs attrs)
1151 {
1152     bool result = true;
1153     int index;
1154 
1155     switch (offset) {
1156     case GITS_BASER ... GITS_BASER + 0x3f:
1157         /*
1158          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1159          *                 already enabled
1160          */
1161         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1162             index = (offset - GITS_BASER) / 8;
1163             if (s->baser[index] == 0) {
1164                 /* Unimplemented GITS_BASERn: RAZ/WI */
1165                 break;
1166             }
1167             s->baser[index] &= GITS_BASER_RO_MASK;
1168             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1169         }
1170         break;
1171     case GITS_CBASER:
1172         /*
1173          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1174          *                 already enabled
1175          */
1176         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1177             s->cbaser = value;
1178             s->creadr = 0;
1179         }
1180         break;
1181     case GITS_CWRITER:
1182         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1183         if (s->cwriter != s->creadr) {
1184             process_cmdq(s);
1185         }
1186         break;
1187     case GITS_CREADR:
1188         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1189             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1190         } else {
1191             /* RO register, ignore the write */
1192             qemu_log_mask(LOG_GUEST_ERROR,
1193                           "%s: invalid guest write to RO register at offset "
1194                           TARGET_FMT_plx "\n", __func__, offset);
1195         }
1196         break;
1197     case GITS_TYPER:
1198         /* RO registers, ignore the write */
1199         qemu_log_mask(LOG_GUEST_ERROR,
1200                       "%s: invalid guest write to RO register at offset "
1201                       TARGET_FMT_plx "\n", __func__, offset);
1202         break;
1203     default:
1204         result = false;
1205         break;
1206     }
1207     return result;
1208 }
1209 
1210 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1211                               uint64_t *data, MemTxAttrs attrs)
1212 {
1213     bool result = true;
1214     int index;
1215 
1216     switch (offset) {
1217     case GITS_TYPER:
1218         *data = s->typer;
1219         break;
1220     case GITS_BASER ... GITS_BASER + 0x3f:
1221         index = (offset - GITS_BASER) / 8;
1222         *data = s->baser[index];
1223         break;
1224     case GITS_CBASER:
1225         *data = s->cbaser;
1226         break;
1227     case GITS_CREADR:
1228         *data = s->creadr;
1229         break;
1230     case GITS_CWRITER:
1231         *data = s->cwriter;
1232         break;
1233     default:
1234         result = false;
1235         break;
1236     }
1237     return result;
1238 }
1239 
1240 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1241                                   unsigned size, MemTxAttrs attrs)
1242 {
1243     GICv3ITSState *s = (GICv3ITSState *)opaque;
1244     bool result;
1245 
1246     switch (size) {
1247     case 4:
1248         result = its_readl(s, offset, data, attrs);
1249         break;
1250     case 8:
1251         result = its_readll(s, offset, data, attrs);
1252         break;
1253     default:
1254         result = false;
1255         break;
1256     }
1257 
1258     if (!result) {
1259         qemu_log_mask(LOG_GUEST_ERROR,
1260                       "%s: invalid guest read at offset " TARGET_FMT_plx
1261                       "size %u\n", __func__, offset, size);
1262         trace_gicv3_its_badread(offset, size);
1263         /*
1264          * The spec requires that reserved registers are RAZ/WI;
1265          * so use false returns from leaf functions as a way to
1266          * trigger the guest-error logging but don't return it to
1267          * the caller, or we'll cause a spurious guest data abort.
1268          */
1269         *data = 0;
1270     } else {
1271         trace_gicv3_its_read(offset, *data, size);
1272     }
1273     return MEMTX_OK;
1274 }
1275 
1276 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1277                                    unsigned size, MemTxAttrs attrs)
1278 {
1279     GICv3ITSState *s = (GICv3ITSState *)opaque;
1280     bool result;
1281 
1282     switch (size) {
1283     case 4:
1284         result = its_writel(s, offset, data, attrs);
1285         break;
1286     case 8:
1287         result = its_writell(s, offset, data, attrs);
1288         break;
1289     default:
1290         result = false;
1291         break;
1292     }
1293 
1294     if (!result) {
1295         qemu_log_mask(LOG_GUEST_ERROR,
1296                       "%s: invalid guest write at offset " TARGET_FMT_plx
1297                       "size %u\n", __func__, offset, size);
1298         trace_gicv3_its_badwrite(offset, data, size);
1299         /*
1300          * The spec requires that reserved registers are RAZ/WI;
1301          * so use false returns from leaf functions as a way to
1302          * trigger the guest-error logging but don't return it to
1303          * the caller, or we'll cause a spurious guest data abort.
1304          */
1305     } else {
1306         trace_gicv3_its_write(offset, data, size);
1307     }
1308     return MEMTX_OK;
1309 }
1310 
1311 static const MemoryRegionOps gicv3_its_control_ops = {
1312     .read_with_attrs = gicv3_its_read,
1313     .write_with_attrs = gicv3_its_write,
1314     .valid.min_access_size = 4,
1315     .valid.max_access_size = 8,
1316     .impl.min_access_size = 4,
1317     .impl.max_access_size = 8,
1318     .endianness = DEVICE_NATIVE_ENDIAN,
1319 };
1320 
1321 static const MemoryRegionOps gicv3_its_translation_ops = {
1322     .read_with_attrs = gicv3_its_translation_read,
1323     .write_with_attrs = gicv3_its_translation_write,
1324     .valid.min_access_size = 2,
1325     .valid.max_access_size = 4,
1326     .impl.min_access_size = 2,
1327     .impl.max_access_size = 4,
1328     .endianness = DEVICE_NATIVE_ENDIAN,
1329 };
1330 
1331 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1332 {
1333     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1334     int i;
1335 
1336     for (i = 0; i < s->gicv3->num_cpu; i++) {
1337         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1338             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1339             return;
1340         }
1341     }
1342 
1343     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1344 
1345     /* set the ITS default features supported */
1346     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1347     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1348                           ITS_ITT_ENTRY_SIZE - 1);
1349     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1350     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1351     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1352     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1353 }
1354 
1355 static void gicv3_its_reset(DeviceState *dev)
1356 {
1357     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1358     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1359 
1360     c->parent_reset(dev);
1361 
1362     /* Quiescent bit reset to 1 */
1363     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1364 
1365     /*
1366      * setting GITS_BASER0.Type = 0b001 (Device)
1367      *         GITS_BASER1.Type = 0b100 (Collection Table)
1368      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1369      *         GITS_BASER<0,1>.Page_Size = 64KB
1370      * and default translation table entry size to 16 bytes
1371      */
1372     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1373                              GITS_BASER_TYPE_DEVICE);
1374     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1375                              GITS_BASER_PAGESIZE_64K);
1376     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1377                              GITS_DTE_SIZE - 1);
1378 
1379     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1380                              GITS_BASER_TYPE_COLLECTION);
1381     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1382                              GITS_BASER_PAGESIZE_64K);
1383     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1384                              GITS_CTE_SIZE - 1);
1385 }
1386 
1387 static void gicv3_its_post_load(GICv3ITSState *s)
1388 {
1389     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1390         extract_table_params(s);
1391         extract_cmdq_params(s);
1392     }
1393 }
1394 
1395 static Property gicv3_its_props[] = {
1396     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1397                      GICv3State *),
1398     DEFINE_PROP_END_OF_LIST(),
1399 };
1400 
1401 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1402 {
1403     DeviceClass *dc = DEVICE_CLASS(klass);
1404     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1405     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1406 
1407     dc->realize = gicv3_arm_its_realize;
1408     device_class_set_props(dc, gicv3_its_props);
1409     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1410     icc->post_load = gicv3_its_post_load;
1411 }
1412 
1413 static const TypeInfo gicv3_its_info = {
1414     .name = TYPE_ARM_GICV3_ITS,
1415     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1416     .instance_size = sizeof(GICv3ITSState),
1417     .class_init = gicv3_its_class_init,
1418     .class_size = sizeof(GICv3ITSClass),
1419 };
1420 
1421 static void gicv3_its_register_types(void)
1422 {
1423     type_register_static(&gicv3_its_info);
1424 }
1425 
1426 type_init(gicv3_its_register_types)
1427