xref: /qemu/hw/intc/arm_gicv3_its.c (revision 50d84584d3c77e3a9104826a53a691318aeaf038)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 /* True if the ITS supports the GICv4 virtual LPI feature */
83 static bool its_feature_virtual(GICv3ITSState *s)
84 {
85     return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
86 }
87 
88 static inline bool intid_in_lpi_range(uint32_t id)
89 {
90     return id >= GICV3_LPI_INTID_START &&
91         id < (1 << (GICD_TYPER_IDBITS + 1));
92 }
93 
94 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
95 {
96     uint64_t result = 0;
97 
98     switch (page_sz) {
99     case GITS_PAGE_SIZE_4K:
100     case GITS_PAGE_SIZE_16K:
101         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
102         break;
103 
104     case GITS_PAGE_SIZE_64K:
105         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
106         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
107         break;
108 
109     default:
110         break;
111     }
112     return result;
113 }
114 
115 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
116                                  uint32_t idx, MemTxResult *res)
117 {
118     /*
119      * Given a TableDesc describing one of the ITS in-guest-memory
120      * tables and an index into it, return the guest address
121      * corresponding to that table entry.
122      * If there was a memory error reading the L1 table of an
123      * indirect table, *res is set accordingly, and we return -1.
124      * If the L1 table entry is marked not valid, we return -1 with
125      * *res set to MEMTX_OK.
126      *
127      * The specification defines the format of level 1 entries of a
128      * 2-level table, but the format of level 2 entries and the format
129      * of flat-mapped tables is IMPDEF.
130      */
131     AddressSpace *as = &s->gicv3->dma_as;
132     uint32_t l2idx;
133     uint64_t l2;
134     uint32_t num_l2_entries;
135 
136     *res = MEMTX_OK;
137 
138     if (!td->indirect) {
139         /* Single level table */
140         return td->base_addr + idx * td->entry_sz;
141     }
142 
143     /* Two level table */
144     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
145 
146     l2 = address_space_ldq_le(as,
147                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
148                               MEMTXATTRS_UNSPECIFIED, res);
149     if (*res != MEMTX_OK) {
150         return -1;
151     }
152     if (!(l2 & L2_TABLE_VALID_MASK)) {
153         return -1;
154     }
155 
156     num_l2_entries = td->page_sz / td->entry_sz;
157     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
158 }
159 
160 /*
161  * Read the Collection Table entry at index @icid. On success (including
162  * successfully determining that there is no valid CTE for this index),
163  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
164  * If there is an error reading memory then we return the error code.
165  */
166 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
167 {
168     AddressSpace *as = &s->gicv3->dma_as;
169     MemTxResult res = MEMTX_OK;
170     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
171     uint64_t cteval;
172 
173     if (entry_addr == -1) {
174         /* No L2 table entry, i.e. no valid CTE, or a memory error */
175         cte->valid = false;
176         goto out;
177     }
178 
179     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
180     if (res != MEMTX_OK) {
181         goto out;
182     }
183     cte->valid = FIELD_EX64(cteval, CTE, VALID);
184     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
185 out:
186     if (res != MEMTX_OK) {
187         trace_gicv3_its_cte_read_fault(icid);
188     } else {
189         trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
190     }
191     return res;
192 }
193 
194 /*
195  * Update the Interrupt Table entry at index @evinted in the table specified
196  * by the dte @dte. Returns true on success, false if there was a memory
197  * access error.
198  */
199 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
200                        const ITEntry *ite)
201 {
202     AddressSpace *as = &s->gicv3->dma_as;
203     MemTxResult res = MEMTX_OK;
204     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
205     uint64_t itel = 0;
206     uint32_t iteh = 0;
207 
208     trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
209                               ite->inttype, ite->intid, ite->icid,
210                               ite->vpeid, ite->doorbell);
211 
212     if (ite->valid) {
213         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
214         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
215         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
216         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
217         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
218         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
219     }
220 
221     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
222     if (res != MEMTX_OK) {
223         return false;
224     }
225     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
226     return res == MEMTX_OK;
227 }
228 
229 /*
230  * Read the Interrupt Table entry at index @eventid from the table specified
231  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
232  * struct @ite accordingly. If there is an error reading memory then we return
233  * the error code.
234  */
235 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
236                            const DTEntry *dte, ITEntry *ite)
237 {
238     AddressSpace *as = &s->gicv3->dma_as;
239     MemTxResult res = MEMTX_OK;
240     uint64_t itel;
241     uint32_t iteh;
242     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
243 
244     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
245     if (res != MEMTX_OK) {
246         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
247         return res;
248     }
249 
250     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
251     if (res != MEMTX_OK) {
252         trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
253         return res;
254     }
255 
256     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
257     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
258     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
259     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
260     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
261     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
262     trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
263                              ite->inttype, ite->intid, ite->icid,
264                              ite->vpeid, ite->doorbell);
265     return MEMTX_OK;
266 }
267 
268 /*
269  * Read the Device Table entry at index @devid. On success (including
270  * successfully determining that there is no valid DTE for this index),
271  * we return MEMTX_OK and populate the DTEntry struct accordingly.
272  * If there is an error reading memory then we return the error code.
273  */
274 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
275 {
276     MemTxResult res = MEMTX_OK;
277     AddressSpace *as = &s->gicv3->dma_as;
278     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
279     uint64_t dteval;
280 
281     if (entry_addr == -1) {
282         /* No L2 table entry, i.e. no valid DTE, or a memory error */
283         dte->valid = false;
284         goto out;
285     }
286     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
287     if (res != MEMTX_OK) {
288         goto out;
289     }
290     dte->valid = FIELD_EX64(dteval, DTE, VALID);
291     dte->size = FIELD_EX64(dteval, DTE, SIZE);
292     /* DTE word field stores bits [51:8] of the ITT address */
293     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
294 out:
295     if (res != MEMTX_OK) {
296         trace_gicv3_its_dte_read_fault(devid);
297     } else {
298         trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
299     }
300     return res;
301 }
302 
303 /*
304  * This function handles the processing of following commands based on
305  * the ItsCmdType parameter passed:-
306  * 1. triggering of lpi interrupt translation via ITS INT command
307  * 2. triggering of lpi interrupt translation via gits_translater register
308  * 3. handling of ITS CLEAR command
309  * 4. handling of ITS DISCARD command
310  */
311 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
312                                        uint32_t eventid, ItsCmdType cmd)
313 {
314     uint64_t num_eventids;
315     DTEntry dte;
316     CTEntry cte;
317     ITEntry ite;
318 
319     if (devid >= s->dt.num_entries) {
320         qemu_log_mask(LOG_GUEST_ERROR,
321                       "%s: invalid command attributes: devid %d>=%d",
322                       __func__, devid, s->dt.num_entries);
323         return CMD_CONTINUE;
324     }
325 
326     if (get_dte(s, devid, &dte) != MEMTX_OK) {
327         return CMD_STALL;
328     }
329     if (!dte.valid) {
330         qemu_log_mask(LOG_GUEST_ERROR,
331                       "%s: invalid command attributes: "
332                       "invalid dte for %d\n", __func__, devid);
333         return CMD_CONTINUE;
334     }
335 
336     num_eventids = 1ULL << (dte.size + 1);
337     if (eventid >= num_eventids) {
338         qemu_log_mask(LOG_GUEST_ERROR,
339                       "%s: invalid command attributes: eventid %d >= %"
340                       PRId64 "\n",
341                       __func__, eventid, num_eventids);
342         return CMD_CONTINUE;
343     }
344 
345     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
346         return CMD_STALL;
347     }
348 
349     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
350         qemu_log_mask(LOG_GUEST_ERROR,
351                       "%s: invalid command attributes: invalid ITE\n",
352                       __func__);
353         return CMD_CONTINUE;
354     }
355 
356     if (ite.icid >= s->ct.num_entries) {
357         qemu_log_mask(LOG_GUEST_ERROR,
358                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
359                       __func__, ite.icid);
360         return CMD_CONTINUE;
361     }
362 
363     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
364         return CMD_STALL;
365     }
366     if (!cte.valid) {
367         qemu_log_mask(LOG_GUEST_ERROR,
368                       "%s: invalid command attributes: invalid CTE\n",
369                       __func__);
370         return CMD_CONTINUE;
371     }
372 
373     /*
374      * Current implementation only supports rdbase == procnum
375      * Hence rdbase physical address is ignored
376      */
377     if (cte.rdbase >= s->gicv3->num_cpu) {
378         return CMD_CONTINUE;
379     }
380 
381     if ((cmd == CLEAR) || (cmd == DISCARD)) {
382         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
383     } else {
384         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
385     }
386 
387     if (cmd == DISCARD) {
388         ITEntry ite = {};
389         /* remove mapping from interrupt translation table */
390         ite.valid = false;
391         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
392     }
393     return CMD_CONTINUE;
394 }
395 
396 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
397                                     ItsCmdType cmd)
398 {
399     uint32_t devid, eventid;
400 
401     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
402     eventid = cmdpkt[1] & EVENTID_MASK;
403     switch (cmd) {
404     case INTERRUPT:
405         trace_gicv3_its_cmd_int(devid, eventid);
406         break;
407     case CLEAR:
408         trace_gicv3_its_cmd_clear(devid, eventid);
409         break;
410     case DISCARD:
411         trace_gicv3_its_cmd_discard(devid, eventid);
412         break;
413     default:
414         g_assert_not_reached();
415     }
416     return do_process_its_cmd(s, devid, eventid, cmd);
417 }
418 
419 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
420                                   bool ignore_pInt)
421 {
422     uint32_t devid, eventid;
423     uint32_t pIntid = 0;
424     uint64_t num_eventids;
425     uint16_t icid = 0;
426     DTEntry dte;
427     ITEntry ite;
428 
429     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
430     eventid = cmdpkt[1] & EVENTID_MASK;
431     icid = cmdpkt[2] & ICID_MASK;
432 
433     if (ignore_pInt) {
434         pIntid = eventid;
435         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
436     } else {
437         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
438         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
439     }
440 
441     if (devid >= s->dt.num_entries) {
442         qemu_log_mask(LOG_GUEST_ERROR,
443                       "%s: invalid command attributes: devid %d>=%d",
444                       __func__, devid, s->dt.num_entries);
445         return CMD_CONTINUE;
446     }
447 
448     if (get_dte(s, devid, &dte) != MEMTX_OK) {
449         return CMD_STALL;
450     }
451     num_eventids = 1ULL << (dte.size + 1);
452 
453     if (icid >= s->ct.num_entries) {
454         qemu_log_mask(LOG_GUEST_ERROR,
455                       "%s: invalid ICID 0x%x >= 0x%x\n",
456                       __func__, icid, s->ct.num_entries);
457         return CMD_CONTINUE;
458     }
459 
460     if (!dte.valid) {
461         qemu_log_mask(LOG_GUEST_ERROR,
462                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
463         return CMD_CONTINUE;
464     }
465 
466     if (eventid >= num_eventids) {
467         qemu_log_mask(LOG_GUEST_ERROR,
468                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
469                       __func__, eventid, num_eventids);
470         return CMD_CONTINUE;
471     }
472 
473     if (!intid_in_lpi_range(pIntid)) {
474         qemu_log_mask(LOG_GUEST_ERROR,
475                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
476         return CMD_CONTINUE;
477     }
478 
479     /* add ite entry to interrupt translation table */
480     ite.valid = true;
481     ite.inttype = ITE_INTTYPE_PHYSICAL;
482     ite.intid = pIntid;
483     ite.icid = icid;
484     ite.doorbell = INTID_SPURIOUS;
485     ite.vpeid = 0;
486     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
487 }
488 
489 /*
490  * Update the Collection Table entry for @icid to @cte. Returns true
491  * on success, false if there was a memory access error.
492  */
493 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
494 {
495     AddressSpace *as = &s->gicv3->dma_as;
496     uint64_t entry_addr;
497     uint64_t cteval = 0;
498     MemTxResult res = MEMTX_OK;
499 
500     trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
501 
502     if (cte->valid) {
503         /* add mapping entry to collection table */
504         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
505         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
506     }
507 
508     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
509     if (res != MEMTX_OK) {
510         /* memory access error: stall */
511         return false;
512     }
513     if (entry_addr == -1) {
514         /* No L2 table for this index: discard write and continue */
515         return true;
516     }
517 
518     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
519     return res == MEMTX_OK;
520 }
521 
522 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
523 {
524     uint16_t icid;
525     CTEntry cte;
526 
527     icid = cmdpkt[2] & ICID_MASK;
528     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
529     if (cte.valid) {
530         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
531         cte.rdbase &= RDBASE_PROCNUM_MASK;
532     } else {
533         cte.rdbase = 0;
534     }
535     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
536 
537     if (icid >= s->ct.num_entries) {
538         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
539         return CMD_CONTINUE;
540     }
541     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
542         qemu_log_mask(LOG_GUEST_ERROR,
543                       "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
544         return CMD_CONTINUE;
545     }
546 
547     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
548 }
549 
550 /*
551  * Update the Device Table entry for @devid to @dte. Returns true
552  * on success, false if there was a memory access error.
553  */
554 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
555 {
556     AddressSpace *as = &s->gicv3->dma_as;
557     uint64_t entry_addr;
558     uint64_t dteval = 0;
559     MemTxResult res = MEMTX_OK;
560 
561     trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
562 
563     if (dte->valid) {
564         /* add mapping entry to device table */
565         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
566         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
567         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
568     }
569 
570     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
571     if (res != MEMTX_OK) {
572         /* memory access error: stall */
573         return false;
574     }
575     if (entry_addr == -1) {
576         /* No L2 table for this index: discard write and continue */
577         return true;
578     }
579     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
580     return res == MEMTX_OK;
581 }
582 
583 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
584 {
585     uint32_t devid;
586     DTEntry dte;
587 
588     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
589     dte.size = cmdpkt[1] & SIZE_MASK;
590     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
591     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
592 
593     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
594 
595     if (devid >= s->dt.num_entries) {
596         qemu_log_mask(LOG_GUEST_ERROR,
597                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
598                       devid, s->dt.num_entries);
599         return CMD_CONTINUE;
600     }
601 
602     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
603         qemu_log_mask(LOG_GUEST_ERROR,
604                       "ITS MAPD: invalid size %d\n", dte.size);
605         return CMD_CONTINUE;
606     }
607 
608     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
609 }
610 
611 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
612 {
613     uint64_t rd1, rd2;
614 
615     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
616     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
617 
618     trace_gicv3_its_cmd_movall(rd1, rd2);
619 
620     if (rd1 >= s->gicv3->num_cpu) {
621         qemu_log_mask(LOG_GUEST_ERROR,
622                       "%s: RDBASE1 %" PRId64
623                       " out of range (must be less than %d)\n",
624                       __func__, rd1, s->gicv3->num_cpu);
625         return CMD_CONTINUE;
626     }
627     if (rd2 >= s->gicv3->num_cpu) {
628         qemu_log_mask(LOG_GUEST_ERROR,
629                       "%s: RDBASE2 %" PRId64
630                       " out of range (must be less than %d)\n",
631                       __func__, rd2, s->gicv3->num_cpu);
632         return CMD_CONTINUE;
633     }
634 
635     if (rd1 == rd2) {
636         /* Move to same target must succeed as a no-op */
637         return CMD_CONTINUE;
638     }
639 
640     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
641     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
642 
643     return CMD_CONTINUE;
644 }
645 
646 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
647 {
648     uint32_t devid, eventid;
649     uint16_t new_icid;
650     uint64_t num_eventids;
651     DTEntry dte;
652     CTEntry old_cte, new_cte;
653     ITEntry old_ite;
654 
655     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
656     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
657     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
658 
659     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
660 
661     if (devid >= s->dt.num_entries) {
662         qemu_log_mask(LOG_GUEST_ERROR,
663                       "%s: invalid command attributes: devid %d>=%d",
664                       __func__, devid, s->dt.num_entries);
665         return CMD_CONTINUE;
666     }
667     if (get_dte(s, devid, &dte) != MEMTX_OK) {
668         return CMD_STALL;
669     }
670 
671     if (!dte.valid) {
672         qemu_log_mask(LOG_GUEST_ERROR,
673                       "%s: invalid command attributes: "
674                       "invalid dte for %d\n", __func__, devid);
675         return CMD_CONTINUE;
676     }
677 
678     num_eventids = 1ULL << (dte.size + 1);
679     if (eventid >= num_eventids) {
680         qemu_log_mask(LOG_GUEST_ERROR,
681                       "%s: invalid command attributes: eventid %d >= %"
682                       PRId64 "\n",
683                       __func__, eventid, num_eventids);
684         return CMD_CONTINUE;
685     }
686 
687     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
688         return CMD_STALL;
689     }
690 
691     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
692         qemu_log_mask(LOG_GUEST_ERROR,
693                       "%s: invalid command attributes: invalid ITE\n",
694                       __func__);
695         return CMD_CONTINUE;
696     }
697 
698     if (old_ite.icid >= s->ct.num_entries) {
699         qemu_log_mask(LOG_GUEST_ERROR,
700                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
701                       __func__, old_ite.icid);
702         return CMD_CONTINUE;
703     }
704 
705     if (new_icid >= s->ct.num_entries) {
706         qemu_log_mask(LOG_GUEST_ERROR,
707                       "%s: invalid command attributes: ICID 0x%x\n",
708                       __func__, new_icid);
709         return CMD_CONTINUE;
710     }
711 
712     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
713         return CMD_STALL;
714     }
715     if (!old_cte.valid) {
716         qemu_log_mask(LOG_GUEST_ERROR,
717                       "%s: invalid command attributes: "
718                       "invalid CTE for old ICID 0x%x\n",
719                       __func__, old_ite.icid);
720         return CMD_CONTINUE;
721     }
722 
723     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
724         return CMD_STALL;
725     }
726     if (!new_cte.valid) {
727         qemu_log_mask(LOG_GUEST_ERROR,
728                       "%s: invalid command attributes: "
729                       "invalid CTE for new ICID 0x%x\n",
730                       __func__, new_icid);
731         return CMD_CONTINUE;
732     }
733 
734     if (old_cte.rdbase >= s->gicv3->num_cpu) {
735         qemu_log_mask(LOG_GUEST_ERROR,
736                       "%s: CTE has invalid rdbase 0x%x\n",
737                       __func__, old_cte.rdbase);
738         return CMD_CONTINUE;
739     }
740 
741     if (new_cte.rdbase >= s->gicv3->num_cpu) {
742         qemu_log_mask(LOG_GUEST_ERROR,
743                       "%s: CTE has invalid rdbase 0x%x\n",
744                       __func__, new_cte.rdbase);
745         return CMD_CONTINUE;
746     }
747 
748     if (old_cte.rdbase != new_cte.rdbase) {
749         /* Move the LPI from the old redistributor to the new one */
750         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
751                              &s->gicv3->cpu[new_cte.rdbase],
752                              old_ite.intid);
753     }
754 
755     /* Update the ICID field in the interrupt translation table entry */
756     old_ite.icid = new_icid;
757     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
758 }
759 
760 /*
761  * Current implementation blocks until all
762  * commands are processed
763  */
764 static void process_cmdq(GICv3ITSState *s)
765 {
766     uint32_t wr_offset = 0;
767     uint32_t rd_offset = 0;
768     uint32_t cq_offset = 0;
769     AddressSpace *as = &s->gicv3->dma_as;
770     uint8_t cmd;
771     int i;
772 
773     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
774         return;
775     }
776 
777     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
778 
779     if (wr_offset >= s->cq.num_entries) {
780         qemu_log_mask(LOG_GUEST_ERROR,
781                       "%s: invalid write offset "
782                       "%d\n", __func__, wr_offset);
783         return;
784     }
785 
786     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
787 
788     if (rd_offset >= s->cq.num_entries) {
789         qemu_log_mask(LOG_GUEST_ERROR,
790                       "%s: invalid read offset "
791                       "%d\n", __func__, rd_offset);
792         return;
793     }
794 
795     while (wr_offset != rd_offset) {
796         ItsCmdResult result = CMD_CONTINUE;
797         void *hostmem;
798         hwaddr buflen;
799         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
800 
801         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
802 
803         buflen = GITS_CMDQ_ENTRY_SIZE;
804         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
805                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
806         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
807             if (hostmem) {
808                 address_space_unmap(as, hostmem, buflen, false, 0);
809             }
810             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
811             qemu_log_mask(LOG_GUEST_ERROR,
812                           "%s: could not read command at 0x%" PRIx64 "\n",
813                           __func__, s->cq.base_addr + cq_offset);
814             break;
815         }
816         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
817             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
818         }
819         address_space_unmap(as, hostmem, buflen, false, 0);
820 
821         cmd = cmdpkt[0] & CMD_MASK;
822 
823         trace_gicv3_its_process_command(rd_offset, cmd);
824 
825         switch (cmd) {
826         case GITS_CMD_INT:
827             result = process_its_cmd(s, cmdpkt, INTERRUPT);
828             break;
829         case GITS_CMD_CLEAR:
830             result = process_its_cmd(s, cmdpkt, CLEAR);
831             break;
832         case GITS_CMD_SYNC:
833             /*
834              * Current implementation makes a blocking synchronous call
835              * for every command issued earlier, hence the internal state
836              * is already consistent by the time SYNC command is executed.
837              * Hence no further processing is required for SYNC command.
838              */
839             trace_gicv3_its_cmd_sync();
840             break;
841         case GITS_CMD_MAPD:
842             result = process_mapd(s, cmdpkt);
843             break;
844         case GITS_CMD_MAPC:
845             result = process_mapc(s, cmdpkt);
846             break;
847         case GITS_CMD_MAPTI:
848             result = process_mapti(s, cmdpkt, false);
849             break;
850         case GITS_CMD_MAPI:
851             result = process_mapti(s, cmdpkt, true);
852             break;
853         case GITS_CMD_DISCARD:
854             result = process_its_cmd(s, cmdpkt, DISCARD);
855             break;
856         case GITS_CMD_INV:
857         case GITS_CMD_INVALL:
858             /*
859              * Current implementation doesn't cache any ITS tables,
860              * but the calculated lpi priority information. We only
861              * need to trigger lpi priority re-calculation to be in
862              * sync with LPI config table or pending table changes.
863              */
864             trace_gicv3_its_cmd_inv();
865             for (i = 0; i < s->gicv3->num_cpu; i++) {
866                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
867             }
868             break;
869         case GITS_CMD_MOVI:
870             result = process_movi(s, cmdpkt);
871             break;
872         case GITS_CMD_MOVALL:
873             result = process_movall(s, cmdpkt);
874             break;
875         default:
876             trace_gicv3_its_cmd_unknown(cmd);
877             break;
878         }
879         if (result == CMD_CONTINUE) {
880             rd_offset++;
881             rd_offset %= s->cq.num_entries;
882             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
883         } else {
884             /* CMD_STALL */
885             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
886             qemu_log_mask(LOG_GUEST_ERROR,
887                           "%s: 0x%x cmd processing failed, stalling\n",
888                           __func__, cmd);
889             break;
890         }
891     }
892 }
893 
894 /*
895  * This function extracts the ITS Device and Collection table specific
896  * parameters (like base_addr, size etc) from GITS_BASER register.
897  * It is called during ITS enable and also during post_load migration
898  */
899 static void extract_table_params(GICv3ITSState *s)
900 {
901     uint16_t num_pages = 0;
902     uint8_t  page_sz_type;
903     uint8_t type;
904     uint32_t page_sz = 0;
905     uint64_t value;
906 
907     for (int i = 0; i < 8; i++) {
908         TableDesc *td;
909         int idbits;
910 
911         value = s->baser[i];
912 
913         if (!value) {
914             continue;
915         }
916 
917         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
918 
919         switch (page_sz_type) {
920         case 0:
921             page_sz = GITS_PAGE_SIZE_4K;
922             break;
923 
924         case 1:
925             page_sz = GITS_PAGE_SIZE_16K;
926             break;
927 
928         case 2:
929         case 3:
930             page_sz = GITS_PAGE_SIZE_64K;
931             break;
932 
933         default:
934             g_assert_not_reached();
935         }
936 
937         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
938 
939         type = FIELD_EX64(value, GITS_BASER, TYPE);
940 
941         switch (type) {
942         case GITS_BASER_TYPE_DEVICE:
943             td = &s->dt;
944             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
945             break;
946         case GITS_BASER_TYPE_COLLECTION:
947             td = &s->ct;
948             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
949                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
950             } else {
951                 /* 16-bit CollectionId supported when CIL == 0 */
952                 idbits = 16;
953             }
954             break;
955         case GITS_BASER_TYPE_VPE:
956             td = &s->vpet;
957             /*
958              * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
959              * implementation to implement fewer bits and report this
960              * via GICD_TYPER2.)
961              */
962             idbits = 16;
963             break;
964         default:
965             /*
966              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
967              * ensures we will only see type values corresponding to
968              * the values set up in gicv3_its_reset().
969              */
970             g_assert_not_reached();
971         }
972 
973         memset(td, 0, sizeof(*td));
974         /*
975          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
976          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
977          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
978          * for the register corresponding to the Collection table but we
979          * still have to process interrupts using non-memory-backed
980          * Collection table entries.)
981          * The specification makes it UNPREDICTABLE to enable the ITS without
982          * marking each BASER<n> as valid. We choose to handle these as if
983          * the table was zero-sized, so commands using the table will fail
984          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
985          * This happens automatically by leaving the num_entries field at
986          * zero, which will be caught by the bounds checks we have before
987          * every table lookup anyway.
988          */
989         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
990             continue;
991         }
992         td->page_sz = page_sz;
993         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
994         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
995         td->base_addr = baser_base_addr(value, page_sz);
996         if (!td->indirect) {
997             td->num_entries = (num_pages * page_sz) / td->entry_sz;
998         } else {
999             td->num_entries = (((num_pages * page_sz) /
1000                                   L1TABLE_ENTRY_SIZE) *
1001                                  (page_sz / td->entry_sz));
1002         }
1003         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
1004     }
1005 }
1006 
1007 static void extract_cmdq_params(GICv3ITSState *s)
1008 {
1009     uint16_t num_pages = 0;
1010     uint64_t value = s->cbaser;
1011 
1012     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
1013 
1014     memset(&s->cq, 0 , sizeof(s->cq));
1015 
1016     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
1017         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
1018                              GITS_CMDQ_ENTRY_SIZE;
1019         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
1020         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
1021     }
1022 }
1023 
1024 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
1025                                               uint64_t *data, unsigned size,
1026                                               MemTxAttrs attrs)
1027 {
1028     /*
1029      * GITS_TRANSLATER is write-only, and all other addresses
1030      * in the interrupt translation space frame are RES0.
1031      */
1032     *data = 0;
1033     return MEMTX_OK;
1034 }
1035 
1036 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
1037                                                uint64_t data, unsigned size,
1038                                                MemTxAttrs attrs)
1039 {
1040     GICv3ITSState *s = (GICv3ITSState *)opaque;
1041     bool result = true;
1042 
1043     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
1044 
1045     switch (offset) {
1046     case GITS_TRANSLATER:
1047         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1048             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1049         }
1050         break;
1051     default:
1052         break;
1053     }
1054 
1055     if (result) {
1056         return MEMTX_OK;
1057     } else {
1058         return MEMTX_ERROR;
1059     }
1060 }
1061 
1062 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1063                               uint64_t value, MemTxAttrs attrs)
1064 {
1065     bool result = true;
1066     int index;
1067 
1068     switch (offset) {
1069     case GITS_CTLR:
1070         if (value & R_GITS_CTLR_ENABLED_MASK) {
1071             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1072             extract_table_params(s);
1073             extract_cmdq_params(s);
1074             process_cmdq(s);
1075         } else {
1076             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1077         }
1078         break;
1079     case GITS_CBASER:
1080         /*
1081          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1082          *                 already enabled
1083          */
1084         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1085             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1086             s->creadr = 0;
1087         }
1088         break;
1089     case GITS_CBASER + 4:
1090         /*
1091          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1092          *                 already enabled
1093          */
1094         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1095             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1096             s->creadr = 0;
1097         }
1098         break;
1099     case GITS_CWRITER:
1100         s->cwriter = deposit64(s->cwriter, 0, 32,
1101                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1102         if (s->cwriter != s->creadr) {
1103             process_cmdq(s);
1104         }
1105         break;
1106     case GITS_CWRITER + 4:
1107         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1108         break;
1109     case GITS_CREADR:
1110         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1111             s->creadr = deposit64(s->creadr, 0, 32,
1112                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1113         } else {
1114             /* RO register, ignore the write */
1115             qemu_log_mask(LOG_GUEST_ERROR,
1116                           "%s: invalid guest write to RO register at offset "
1117                           TARGET_FMT_plx "\n", __func__, offset);
1118         }
1119         break;
1120     case GITS_CREADR + 4:
1121         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1122             s->creadr = deposit64(s->creadr, 32, 32, value);
1123         } else {
1124             /* RO register, ignore the write */
1125             qemu_log_mask(LOG_GUEST_ERROR,
1126                           "%s: invalid guest write to RO register at offset "
1127                           TARGET_FMT_plx "\n", __func__, offset);
1128         }
1129         break;
1130     case GITS_BASER ... GITS_BASER + 0x3f:
1131         /*
1132          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1133          *                 already enabled
1134          */
1135         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1136             index = (offset - GITS_BASER) / 8;
1137 
1138             if (s->baser[index] == 0) {
1139                 /* Unimplemented GITS_BASERn: RAZ/WI */
1140                 break;
1141             }
1142             if (offset & 7) {
1143                 value <<= 32;
1144                 value &= ~GITS_BASER_RO_MASK;
1145                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1146                 s->baser[index] |= value;
1147             } else {
1148                 value &= ~GITS_BASER_RO_MASK;
1149                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1150                 s->baser[index] |= value;
1151             }
1152         }
1153         break;
1154     case GITS_IIDR:
1155     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1156         /* RO registers, ignore the write */
1157         qemu_log_mask(LOG_GUEST_ERROR,
1158                       "%s: invalid guest write to RO register at offset "
1159                       TARGET_FMT_plx "\n", __func__, offset);
1160         break;
1161     default:
1162         result = false;
1163         break;
1164     }
1165     return result;
1166 }
1167 
1168 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1169                              uint64_t *data, MemTxAttrs attrs)
1170 {
1171     bool result = true;
1172     int index;
1173 
1174     switch (offset) {
1175     case GITS_CTLR:
1176         *data = s->ctlr;
1177         break;
1178     case GITS_IIDR:
1179         *data = gicv3_iidr();
1180         break;
1181     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1182         /* ID registers */
1183         *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
1184         break;
1185     case GITS_TYPER:
1186         *data = extract64(s->typer, 0, 32);
1187         break;
1188     case GITS_TYPER + 4:
1189         *data = extract64(s->typer, 32, 32);
1190         break;
1191     case GITS_CBASER:
1192         *data = extract64(s->cbaser, 0, 32);
1193         break;
1194     case GITS_CBASER + 4:
1195         *data = extract64(s->cbaser, 32, 32);
1196         break;
1197     case GITS_CREADR:
1198         *data = extract64(s->creadr, 0, 32);
1199         break;
1200     case GITS_CREADR + 4:
1201         *data = extract64(s->creadr, 32, 32);
1202         break;
1203     case GITS_CWRITER:
1204         *data = extract64(s->cwriter, 0, 32);
1205         break;
1206     case GITS_CWRITER + 4:
1207         *data = extract64(s->cwriter, 32, 32);
1208         break;
1209     case GITS_BASER ... GITS_BASER + 0x3f:
1210         index = (offset - GITS_BASER) / 8;
1211         if (offset & 7) {
1212             *data = extract64(s->baser[index], 32, 32);
1213         } else {
1214             *data = extract64(s->baser[index], 0, 32);
1215         }
1216         break;
1217     default:
1218         result = false;
1219         break;
1220     }
1221     return result;
1222 }
1223 
1224 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1225                                uint64_t value, MemTxAttrs attrs)
1226 {
1227     bool result = true;
1228     int index;
1229 
1230     switch (offset) {
1231     case GITS_BASER ... GITS_BASER + 0x3f:
1232         /*
1233          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1234          *                 already enabled
1235          */
1236         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1237             index = (offset - GITS_BASER) / 8;
1238             if (s->baser[index] == 0) {
1239                 /* Unimplemented GITS_BASERn: RAZ/WI */
1240                 break;
1241             }
1242             s->baser[index] &= GITS_BASER_RO_MASK;
1243             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1244         }
1245         break;
1246     case GITS_CBASER:
1247         /*
1248          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1249          *                 already enabled
1250          */
1251         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1252             s->cbaser = value;
1253             s->creadr = 0;
1254         }
1255         break;
1256     case GITS_CWRITER:
1257         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1258         if (s->cwriter != s->creadr) {
1259             process_cmdq(s);
1260         }
1261         break;
1262     case GITS_CREADR:
1263         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1264             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1265         } else {
1266             /* RO register, ignore the write */
1267             qemu_log_mask(LOG_GUEST_ERROR,
1268                           "%s: invalid guest write to RO register at offset "
1269                           TARGET_FMT_plx "\n", __func__, offset);
1270         }
1271         break;
1272     case GITS_TYPER:
1273         /* RO registers, ignore the write */
1274         qemu_log_mask(LOG_GUEST_ERROR,
1275                       "%s: invalid guest write to RO register at offset "
1276                       TARGET_FMT_plx "\n", __func__, offset);
1277         break;
1278     default:
1279         result = false;
1280         break;
1281     }
1282     return result;
1283 }
1284 
1285 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1286                               uint64_t *data, MemTxAttrs attrs)
1287 {
1288     bool result = true;
1289     int index;
1290 
1291     switch (offset) {
1292     case GITS_TYPER:
1293         *data = s->typer;
1294         break;
1295     case GITS_BASER ... GITS_BASER + 0x3f:
1296         index = (offset - GITS_BASER) / 8;
1297         *data = s->baser[index];
1298         break;
1299     case GITS_CBASER:
1300         *data = s->cbaser;
1301         break;
1302     case GITS_CREADR:
1303         *data = s->creadr;
1304         break;
1305     case GITS_CWRITER:
1306         *data = s->cwriter;
1307         break;
1308     default:
1309         result = false;
1310         break;
1311     }
1312     return result;
1313 }
1314 
1315 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1316                                   unsigned size, MemTxAttrs attrs)
1317 {
1318     GICv3ITSState *s = (GICv3ITSState *)opaque;
1319     bool result;
1320 
1321     switch (size) {
1322     case 4:
1323         result = its_readl(s, offset, data, attrs);
1324         break;
1325     case 8:
1326         result = its_readll(s, offset, data, attrs);
1327         break;
1328     default:
1329         result = false;
1330         break;
1331     }
1332 
1333     if (!result) {
1334         qemu_log_mask(LOG_GUEST_ERROR,
1335                       "%s: invalid guest read at offset " TARGET_FMT_plx
1336                       " size %u\n", __func__, offset, size);
1337         trace_gicv3_its_badread(offset, size);
1338         /*
1339          * The spec requires that reserved registers are RAZ/WI;
1340          * so use false returns from leaf functions as a way to
1341          * trigger the guest-error logging but don't return it to
1342          * the caller, or we'll cause a spurious guest data abort.
1343          */
1344         *data = 0;
1345     } else {
1346         trace_gicv3_its_read(offset, *data, size);
1347     }
1348     return MEMTX_OK;
1349 }
1350 
1351 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1352                                    unsigned size, MemTxAttrs attrs)
1353 {
1354     GICv3ITSState *s = (GICv3ITSState *)opaque;
1355     bool result;
1356 
1357     switch (size) {
1358     case 4:
1359         result = its_writel(s, offset, data, attrs);
1360         break;
1361     case 8:
1362         result = its_writell(s, offset, data, attrs);
1363         break;
1364     default:
1365         result = false;
1366         break;
1367     }
1368 
1369     if (!result) {
1370         qemu_log_mask(LOG_GUEST_ERROR,
1371                       "%s: invalid guest write at offset " TARGET_FMT_plx
1372                       " size %u\n", __func__, offset, size);
1373         trace_gicv3_its_badwrite(offset, data, size);
1374         /*
1375          * The spec requires that reserved registers are RAZ/WI;
1376          * so use false returns from leaf functions as a way to
1377          * trigger the guest-error logging but don't return it to
1378          * the caller, or we'll cause a spurious guest data abort.
1379          */
1380     } else {
1381         trace_gicv3_its_write(offset, data, size);
1382     }
1383     return MEMTX_OK;
1384 }
1385 
1386 static const MemoryRegionOps gicv3_its_control_ops = {
1387     .read_with_attrs = gicv3_its_read,
1388     .write_with_attrs = gicv3_its_write,
1389     .valid.min_access_size = 4,
1390     .valid.max_access_size = 8,
1391     .impl.min_access_size = 4,
1392     .impl.max_access_size = 8,
1393     .endianness = DEVICE_NATIVE_ENDIAN,
1394 };
1395 
1396 static const MemoryRegionOps gicv3_its_translation_ops = {
1397     .read_with_attrs = gicv3_its_translation_read,
1398     .write_with_attrs = gicv3_its_translation_write,
1399     .valid.min_access_size = 2,
1400     .valid.max_access_size = 4,
1401     .impl.min_access_size = 2,
1402     .impl.max_access_size = 4,
1403     .endianness = DEVICE_NATIVE_ENDIAN,
1404 };
1405 
1406 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1407 {
1408     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1409     int i;
1410 
1411     for (i = 0; i < s->gicv3->num_cpu; i++) {
1412         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1413             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1414             return;
1415         }
1416     }
1417 
1418     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1419 
1420     /* set the ITS default features supported */
1421     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1422     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1423                           ITS_ITT_ENTRY_SIZE - 1);
1424     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1425     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1426     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1427     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1428 }
1429 
1430 static void gicv3_its_reset(DeviceState *dev)
1431 {
1432     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1433     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1434 
1435     c->parent_reset(dev);
1436 
1437     /* Quiescent bit reset to 1 */
1438     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1439 
1440     /*
1441      * setting GITS_BASER0.Type = 0b001 (Device)
1442      *         GITS_BASER1.Type = 0b100 (Collection Table)
1443      *         GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
1444      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1445      *         GITS_BASER<0,1>.Page_Size = 64KB
1446      * and default translation table entry size to 16 bytes
1447      */
1448     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1449                              GITS_BASER_TYPE_DEVICE);
1450     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1451                              GITS_BASER_PAGESIZE_64K);
1452     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1453                              GITS_DTE_SIZE - 1);
1454 
1455     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1456                              GITS_BASER_TYPE_COLLECTION);
1457     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1458                              GITS_BASER_PAGESIZE_64K);
1459     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1460                              GITS_CTE_SIZE - 1);
1461 
1462     if (its_feature_virtual(s)) {
1463         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
1464                                  GITS_BASER_TYPE_VPE);
1465         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
1466                                  GITS_BASER_PAGESIZE_64K);
1467         s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
1468                                  GITS_VPE_SIZE - 1);
1469     }
1470 }
1471 
1472 static void gicv3_its_post_load(GICv3ITSState *s)
1473 {
1474     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1475         extract_table_params(s);
1476         extract_cmdq_params(s);
1477     }
1478 }
1479 
1480 static Property gicv3_its_props[] = {
1481     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1482                      GICv3State *),
1483     DEFINE_PROP_END_OF_LIST(),
1484 };
1485 
1486 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1487 {
1488     DeviceClass *dc = DEVICE_CLASS(klass);
1489     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1490     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1491 
1492     dc->realize = gicv3_arm_its_realize;
1493     device_class_set_props(dc, gicv3_its_props);
1494     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1495     icc->post_load = gicv3_its_post_load;
1496 }
1497 
1498 static const TypeInfo gicv3_its_info = {
1499     .name = TYPE_ARM_GICV3_ITS,
1500     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1501     .instance_size = sizeof(GICv3ITSState),
1502     .class_init = gicv3_its_class_init,
1503     .class_size = sizeof(GICv3ITSClass),
1504 };
1505 
1506 static void gicv3_its_register_types(void)
1507 {
1508     type_register_static(&gicv3_its_info);
1509 }
1510 
1511 type_init(gicv3_its_register_types)
1512