xref: /qemu/hw/intc/arm_gicv3_its.c (revision e40509801d4f0b17fac641a07696ecb41421b3dc)
1 /*
2  * ITS emulation for a GICv3-based system
3  *
4  * Copyright Linaro.org 2021
5  *
6  * Authors:
7  *  Shashi Mallela <shashi.mallela@linaro.org>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10  * option) any later version.  See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/log.h"
16 #include "trace.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26                      ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 
28 struct GICv3ITSClass {
29     GICv3ITSCommonClass parent_class;
30     void (*parent_reset)(DeviceState *dev);
31 };
32 
33 /*
34  * This is an internal enum used to distinguish between LPI triggered
35  * via command queue and LPI triggered via gits_translater write.
36  */
37 typedef enum ItsCmdType {
38     NONE = 0, /* internal indication for GITS_TRANSLATER write */
39     CLEAR = 1,
40     DISCARD = 2,
41     INTERRUPT = 3,
42 } ItsCmdType;
43 
44 typedef struct DTEntry {
45     bool valid;
46     unsigned size;
47     uint64_t ittaddr;
48 } DTEntry;
49 
50 typedef struct CTEntry {
51     bool valid;
52     uint32_t rdbase;
53 } CTEntry;
54 
55 typedef struct ITEntry {
56     bool valid;
57     int inttype;
58     uint32_t intid;
59     uint32_t doorbell;
60     uint32_t icid;
61     uint32_t vpeid;
62 } ITEntry;
63 
64 
65 /*
66  * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
67  * if a command parameter is not correct. These include both "stall
68  * processing of the command queue" and "ignore this command, and
69  * keep processing the queue". In our implementation we choose that
70  * memory transaction errors reading the command packet provoke a
71  * stall, but errors in parameters cause us to ignore the command
72  * and continue processing.
73  * The process_* functions which handle individual ITS commands all
74  * return an ItsCmdResult which tells process_cmdq() whether it should
75  * stall or keep going.
76  */
77 typedef enum ItsCmdResult {
78     CMD_STALL = 0,
79     CMD_CONTINUE = 1,
80 } ItsCmdResult;
81 
82 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
83 {
84     uint64_t result = 0;
85 
86     switch (page_sz) {
87     case GITS_PAGE_SIZE_4K:
88     case GITS_PAGE_SIZE_16K:
89         result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
90         break;
91 
92     case GITS_PAGE_SIZE_64K:
93         result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
94         result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
95         break;
96 
97     default:
98         break;
99     }
100     return result;
101 }
102 
103 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
104                                  uint32_t idx, MemTxResult *res)
105 {
106     /*
107      * Given a TableDesc describing one of the ITS in-guest-memory
108      * tables and an index into it, return the guest address
109      * corresponding to that table entry.
110      * If there was a memory error reading the L1 table of an
111      * indirect table, *res is set accordingly, and we return -1.
112      * If the L1 table entry is marked not valid, we return -1 with
113      * *res set to MEMTX_OK.
114      *
115      * The specification defines the format of level 1 entries of a
116      * 2-level table, but the format of level 2 entries and the format
117      * of flat-mapped tables is IMPDEF.
118      */
119     AddressSpace *as = &s->gicv3->dma_as;
120     uint32_t l2idx;
121     uint64_t l2;
122     uint32_t num_l2_entries;
123 
124     *res = MEMTX_OK;
125 
126     if (!td->indirect) {
127         /* Single level table */
128         return td->base_addr + idx * td->entry_sz;
129     }
130 
131     /* Two level table */
132     l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
133 
134     l2 = address_space_ldq_le(as,
135                               td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
136                               MEMTXATTRS_UNSPECIFIED, res);
137     if (*res != MEMTX_OK) {
138         return -1;
139     }
140     if (!(l2 & L2_TABLE_VALID_MASK)) {
141         return -1;
142     }
143 
144     num_l2_entries = td->page_sz / td->entry_sz;
145     return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
146 }
147 
148 /*
149  * Read the Collection Table entry at index @icid. On success (including
150  * successfully determining that there is no valid CTE for this index),
151  * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
152  * If there is an error reading memory then we return the error code.
153  */
154 static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
155 {
156     AddressSpace *as = &s->gicv3->dma_as;
157     MemTxResult res = MEMTX_OK;
158     uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
159     uint64_t cteval;
160 
161     if (entry_addr == -1) {
162         /* No L2 table entry, i.e. no valid CTE, or a memory error */
163         cte->valid = false;
164         return res;
165     }
166 
167     cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
168     if (res != MEMTX_OK) {
169         return res;
170     }
171     cte->valid = FIELD_EX64(cteval, CTE, VALID);
172     cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
173     return MEMTX_OK;
174 }
175 
176 /*
177  * Update the Interrupt Table entry at index @evinted in the table specified
178  * by the dte @dte. Returns true on success, false if there was a memory
179  * access error.
180  */
181 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
182                        const ITEntry *ite)
183 {
184     AddressSpace *as = &s->gicv3->dma_as;
185     MemTxResult res = MEMTX_OK;
186     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
187     uint64_t itel = 0;
188     uint32_t iteh = 0;
189 
190     if (ite->valid) {
191         itel = FIELD_DP64(itel, ITE_L, VALID, 1);
192         itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
193         itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
194         itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
195         itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
196         iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
197     }
198 
199     address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
200     if (res != MEMTX_OK) {
201         return false;
202     }
203     address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
204     return res == MEMTX_OK;
205 }
206 
207 /*
208  * Read the Interrupt Table entry at index @eventid from the table specified
209  * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
210  * struct @ite accordingly. If there is an error reading memory then we return
211  * the error code.
212  */
213 static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
214                            const DTEntry *dte, ITEntry *ite)
215 {
216     AddressSpace *as = &s->gicv3->dma_as;
217     MemTxResult res = MEMTX_OK;
218     uint64_t itel;
219     uint32_t iteh;
220     hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
221 
222     itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
223     if (res != MEMTX_OK) {
224         return res;
225     }
226 
227     iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
228     if (res != MEMTX_OK) {
229         return res;
230     }
231 
232     ite->valid = FIELD_EX64(itel, ITE_L, VALID);
233     ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
234     ite->intid = FIELD_EX64(itel, ITE_L, INTID);
235     ite->icid = FIELD_EX64(itel, ITE_L, ICID);
236     ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
237     ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
238     return MEMTX_OK;
239 }
240 
241 /*
242  * Read the Device Table entry at index @devid. On success (including
243  * successfully determining that there is no valid DTE for this index),
244  * we return MEMTX_OK and populate the DTEntry struct accordingly.
245  * If there is an error reading memory then we return the error code.
246  */
247 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
248 {
249     MemTxResult res = MEMTX_OK;
250     AddressSpace *as = &s->gicv3->dma_as;
251     uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
252     uint64_t dteval;
253 
254     if (entry_addr == -1) {
255         /* No L2 table entry, i.e. no valid DTE, or a memory error */
256         dte->valid = false;
257         return res;
258     }
259     dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
260     if (res != MEMTX_OK) {
261         return res;
262     }
263     dte->valid = FIELD_EX64(dteval, DTE, VALID);
264     dte->size = FIELD_EX64(dteval, DTE, SIZE);
265     /* DTE word field stores bits [51:8] of the ITT address */
266     dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
267     return MEMTX_OK;
268 }
269 
270 /*
271  * This function handles the processing of following commands based on
272  * the ItsCmdType parameter passed:-
273  * 1. triggering of lpi interrupt translation via ITS INT command
274  * 2. triggering of lpi interrupt translation via gits_translater register
275  * 3. handling of ITS CLEAR command
276  * 4. handling of ITS DISCARD command
277  */
278 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
279                                        uint32_t eventid, ItsCmdType cmd)
280 {
281     uint64_t num_eventids;
282     DTEntry dte;
283     CTEntry cte;
284     ITEntry ite;
285 
286     if (devid >= s->dt.num_entries) {
287         qemu_log_mask(LOG_GUEST_ERROR,
288                       "%s: invalid command attributes: devid %d>=%d",
289                       __func__, devid, s->dt.num_entries);
290         return CMD_CONTINUE;
291     }
292 
293     if (get_dte(s, devid, &dte) != MEMTX_OK) {
294         return CMD_STALL;
295     }
296     if (!dte.valid) {
297         qemu_log_mask(LOG_GUEST_ERROR,
298                       "%s: invalid command attributes: "
299                       "invalid dte for %d\n", __func__, devid);
300         return CMD_CONTINUE;
301     }
302 
303     num_eventids = 1ULL << (dte.size + 1);
304     if (eventid >= num_eventids) {
305         qemu_log_mask(LOG_GUEST_ERROR,
306                       "%s: invalid command attributes: eventid %d >= %"
307                       PRId64 "\n",
308                       __func__, eventid, num_eventids);
309         return CMD_CONTINUE;
310     }
311 
312     if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
313         return CMD_STALL;
314     }
315 
316     if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
317         qemu_log_mask(LOG_GUEST_ERROR,
318                       "%s: invalid command attributes: invalid ITE\n",
319                       __func__);
320         return CMD_CONTINUE;
321     }
322 
323     if (ite.icid >= s->ct.num_entries) {
324         qemu_log_mask(LOG_GUEST_ERROR,
325                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
326                       __func__, ite.icid);
327         return CMD_CONTINUE;
328     }
329 
330     if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
331         return CMD_STALL;
332     }
333     if (!cte.valid) {
334         qemu_log_mask(LOG_GUEST_ERROR,
335                       "%s: invalid command attributes: invalid CTE\n",
336                       __func__);
337         return CMD_CONTINUE;
338     }
339 
340     /*
341      * Current implementation only supports rdbase == procnum
342      * Hence rdbase physical address is ignored
343      */
344     if (cte.rdbase >= s->gicv3->num_cpu) {
345         return CMD_CONTINUE;
346     }
347 
348     if ((cmd == CLEAR) || (cmd == DISCARD)) {
349         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
350     } else {
351         gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
352     }
353 
354     if (cmd == DISCARD) {
355         ITEntry ite = {};
356         /* remove mapping from interrupt translation table */
357         ite.valid = false;
358         return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
359     }
360     return CMD_CONTINUE;
361 }
362 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
363                                     ItsCmdType cmd)
364 {
365     uint32_t devid, eventid;
366 
367     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
368     eventid = cmdpkt[1] & EVENTID_MASK;
369     switch (cmd) {
370     case INTERRUPT:
371         trace_gicv3_its_cmd_int(devid, eventid);
372         break;
373     case CLEAR:
374         trace_gicv3_its_cmd_clear(devid, eventid);
375         break;
376     case DISCARD:
377         trace_gicv3_its_cmd_discard(devid, eventid);
378         break;
379     default:
380         g_assert_not_reached();
381     }
382     return do_process_its_cmd(s, devid, eventid, cmd);
383 }
384 
385 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
386                                   bool ignore_pInt)
387 {
388     uint32_t devid, eventid;
389     uint32_t pIntid = 0;
390     uint64_t num_eventids;
391     uint32_t num_intids;
392     uint16_t icid = 0;
393     DTEntry dte;
394     ITEntry ite;
395 
396     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
397     eventid = cmdpkt[1] & EVENTID_MASK;
398     icid = cmdpkt[2] & ICID_MASK;
399 
400     if (ignore_pInt) {
401         pIntid = eventid;
402         trace_gicv3_its_cmd_mapi(devid, eventid, icid);
403     } else {
404         pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
405         trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
406     }
407 
408     if (devid >= s->dt.num_entries) {
409         qemu_log_mask(LOG_GUEST_ERROR,
410                       "%s: invalid command attributes: devid %d>=%d",
411                       __func__, devid, s->dt.num_entries);
412         return CMD_CONTINUE;
413     }
414 
415     if (get_dte(s, devid, &dte) != MEMTX_OK) {
416         return CMD_STALL;
417     }
418     num_eventids = 1ULL << (dte.size + 1);
419     num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
420 
421     if (icid >= s->ct.num_entries) {
422         qemu_log_mask(LOG_GUEST_ERROR,
423                       "%s: invalid ICID 0x%x >= 0x%x\n",
424                       __func__, icid, s->ct.num_entries);
425         return CMD_CONTINUE;
426     }
427 
428     if (!dte.valid) {
429         qemu_log_mask(LOG_GUEST_ERROR,
430                       "%s: no valid DTE for devid 0x%x\n", __func__, devid);
431         return CMD_CONTINUE;
432     }
433 
434     if (eventid >= num_eventids) {
435         qemu_log_mask(LOG_GUEST_ERROR,
436                       "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
437                       __func__, eventid, num_eventids);
438         return CMD_CONTINUE;
439     }
440 
441     if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) {
442         qemu_log_mask(LOG_GUEST_ERROR,
443                       "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
444         return CMD_CONTINUE;
445     }
446 
447     /* add ite entry to interrupt translation table */
448     ite.valid = true;
449     ite.inttype = ITE_INTTYPE_PHYSICAL;
450     ite.intid = pIntid;
451     ite.icid = icid;
452     ite.doorbell = INTID_SPURIOUS;
453     ite.vpeid = 0;
454     return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
455 }
456 
457 /*
458  * Update the Collection Table entry for @icid to @cte. Returns true
459  * on success, false if there was a memory access error.
460  */
461 static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
462 {
463     AddressSpace *as = &s->gicv3->dma_as;
464     uint64_t entry_addr;
465     uint64_t cteval = 0;
466     MemTxResult res = MEMTX_OK;
467 
468     if (cte->valid) {
469         /* add mapping entry to collection table */
470         cteval = FIELD_DP64(cteval, CTE, VALID, 1);
471         cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
472     }
473 
474     entry_addr = table_entry_addr(s, &s->ct, icid, &res);
475     if (res != MEMTX_OK) {
476         /* memory access error: stall */
477         return false;
478     }
479     if (entry_addr == -1) {
480         /* No L2 table for this index: discard write and continue */
481         return true;
482     }
483 
484     address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
485     return res == MEMTX_OK;
486 }
487 
488 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
489 {
490     uint16_t icid;
491     CTEntry cte;
492 
493     icid = cmdpkt[2] & ICID_MASK;
494     cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
495     if (cte.valid) {
496         cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
497         cte.rdbase &= RDBASE_PROCNUM_MASK;
498     } else {
499         cte.rdbase = 0;
500     }
501     trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
502 
503     if (icid >= s->ct.num_entries) {
504         qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%d", icid);
505         return CMD_CONTINUE;
506     }
507     if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
508         qemu_log_mask(LOG_GUEST_ERROR,
509                       "ITS MAPC: invalid RDBASE %u ", cte.rdbase);
510         return CMD_CONTINUE;
511     }
512 
513     return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
514 }
515 
516 /*
517  * Update the Device Table entry for @devid to @dte. Returns true
518  * on success, false if there was a memory access error.
519  */
520 static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
521 {
522     AddressSpace *as = &s->gicv3->dma_as;
523     uint64_t entry_addr;
524     uint64_t dteval = 0;
525     MemTxResult res = MEMTX_OK;
526 
527     if (dte->valid) {
528         /* add mapping entry to device table */
529         dteval = FIELD_DP64(dteval, DTE, VALID, 1);
530         dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
531         dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
532     }
533 
534     entry_addr = table_entry_addr(s, &s->dt, devid, &res);
535     if (res != MEMTX_OK) {
536         /* memory access error: stall */
537         return false;
538     }
539     if (entry_addr == -1) {
540         /* No L2 table for this index: discard write and continue */
541         return true;
542     }
543     address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
544     return res == MEMTX_OK;
545 }
546 
547 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
548 {
549     uint32_t devid;
550     DTEntry dte;
551 
552     devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
553     dte.size = cmdpkt[1] & SIZE_MASK;
554     dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
555     dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
556 
557     trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
558 
559     if (devid >= s->dt.num_entries) {
560         qemu_log_mask(LOG_GUEST_ERROR,
561                       "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
562                       devid, s->dt.num_entries);
563         return CMD_CONTINUE;
564     }
565 
566     if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
567         qemu_log_mask(LOG_GUEST_ERROR,
568                       "ITS MAPD: invalid size %d\n", dte.size);
569         return CMD_CONTINUE;
570     }
571 
572     return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
573 }
574 
575 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
576 {
577     uint64_t rd1, rd2;
578 
579     rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
580     rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
581 
582     trace_gicv3_its_cmd_movall(rd1, rd2);
583 
584     if (rd1 >= s->gicv3->num_cpu) {
585         qemu_log_mask(LOG_GUEST_ERROR,
586                       "%s: RDBASE1 %" PRId64
587                       " out of range (must be less than %d)\n",
588                       __func__, rd1, s->gicv3->num_cpu);
589         return CMD_CONTINUE;
590     }
591     if (rd2 >= s->gicv3->num_cpu) {
592         qemu_log_mask(LOG_GUEST_ERROR,
593                       "%s: RDBASE2 %" PRId64
594                       " out of range (must be less than %d)\n",
595                       __func__, rd2, s->gicv3->num_cpu);
596         return CMD_CONTINUE;
597     }
598 
599     if (rd1 == rd2) {
600         /* Move to same target must succeed as a no-op */
601         return CMD_CONTINUE;
602     }
603 
604     /* Move all pending LPIs from redistributor 1 to redistributor 2 */
605     gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
606 
607     return CMD_CONTINUE;
608 }
609 
610 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
611 {
612     uint32_t devid, eventid;
613     uint16_t new_icid;
614     uint64_t num_eventids;
615     DTEntry dte;
616     CTEntry old_cte, new_cte;
617     ITEntry old_ite;
618 
619     devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
620     eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
621     new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
622 
623     trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
624 
625     if (devid >= s->dt.num_entries) {
626         qemu_log_mask(LOG_GUEST_ERROR,
627                       "%s: invalid command attributes: devid %d>=%d",
628                       __func__, devid, s->dt.num_entries);
629         return CMD_CONTINUE;
630     }
631     if (get_dte(s, devid, &dte) != MEMTX_OK) {
632         return CMD_STALL;
633     }
634 
635     if (!dte.valid) {
636         qemu_log_mask(LOG_GUEST_ERROR,
637                       "%s: invalid command attributes: "
638                       "invalid dte for %d\n", __func__, devid);
639         return CMD_CONTINUE;
640     }
641 
642     num_eventids = 1ULL << (dte.size + 1);
643     if (eventid >= num_eventids) {
644         qemu_log_mask(LOG_GUEST_ERROR,
645                       "%s: invalid command attributes: eventid %d >= %"
646                       PRId64 "\n",
647                       __func__, eventid, num_eventids);
648         return CMD_CONTINUE;
649     }
650 
651     if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
652         return CMD_STALL;
653     }
654 
655     if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
656         qemu_log_mask(LOG_GUEST_ERROR,
657                       "%s: invalid command attributes: invalid ITE\n",
658                       __func__);
659         return CMD_CONTINUE;
660     }
661 
662     if (old_ite.icid >= s->ct.num_entries) {
663         qemu_log_mask(LOG_GUEST_ERROR,
664                       "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
665                       __func__, old_ite.icid);
666         return CMD_CONTINUE;
667     }
668 
669     if (new_icid >= s->ct.num_entries) {
670         qemu_log_mask(LOG_GUEST_ERROR,
671                       "%s: invalid command attributes: ICID 0x%x\n",
672                       __func__, new_icid);
673         return CMD_CONTINUE;
674     }
675 
676     if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
677         return CMD_STALL;
678     }
679     if (!old_cte.valid) {
680         qemu_log_mask(LOG_GUEST_ERROR,
681                       "%s: invalid command attributes: "
682                       "invalid CTE for old ICID 0x%x\n",
683                       __func__, old_ite.icid);
684         return CMD_CONTINUE;
685     }
686 
687     if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
688         return CMD_STALL;
689     }
690     if (!new_cte.valid) {
691         qemu_log_mask(LOG_GUEST_ERROR,
692                       "%s: invalid command attributes: "
693                       "invalid CTE for new ICID 0x%x\n",
694                       __func__, new_icid);
695         return CMD_CONTINUE;
696     }
697 
698     if (old_cte.rdbase >= s->gicv3->num_cpu) {
699         qemu_log_mask(LOG_GUEST_ERROR,
700                       "%s: CTE has invalid rdbase 0x%x\n",
701                       __func__, old_cte.rdbase);
702         return CMD_CONTINUE;
703     }
704 
705     if (new_cte.rdbase >= s->gicv3->num_cpu) {
706         qemu_log_mask(LOG_GUEST_ERROR,
707                       "%s: CTE has invalid rdbase 0x%x\n",
708                       __func__, new_cte.rdbase);
709         return CMD_CONTINUE;
710     }
711 
712     if (old_cte.rdbase != new_cte.rdbase) {
713         /* Move the LPI from the old redistributor to the new one */
714         gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
715                              &s->gicv3->cpu[new_cte.rdbase],
716                              old_ite.intid);
717     }
718 
719     /* Update the ICID field in the interrupt translation table entry */
720     old_ite.icid = new_icid;
721     return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
722 }
723 
724 /*
725  * Current implementation blocks until all
726  * commands are processed
727  */
728 static void process_cmdq(GICv3ITSState *s)
729 {
730     uint32_t wr_offset = 0;
731     uint32_t rd_offset = 0;
732     uint32_t cq_offset = 0;
733     AddressSpace *as = &s->gicv3->dma_as;
734     uint8_t cmd;
735     int i;
736 
737     if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
738         return;
739     }
740 
741     wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
742 
743     if (wr_offset >= s->cq.num_entries) {
744         qemu_log_mask(LOG_GUEST_ERROR,
745                       "%s: invalid write offset "
746                       "%d\n", __func__, wr_offset);
747         return;
748     }
749 
750     rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
751 
752     if (rd_offset >= s->cq.num_entries) {
753         qemu_log_mask(LOG_GUEST_ERROR,
754                       "%s: invalid read offset "
755                       "%d\n", __func__, rd_offset);
756         return;
757     }
758 
759     while (wr_offset != rd_offset) {
760         ItsCmdResult result = CMD_CONTINUE;
761         void *hostmem;
762         hwaddr buflen;
763         uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
764 
765         cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
766 
767         buflen = GITS_CMDQ_ENTRY_SIZE;
768         hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
769                                     &buflen, false, MEMTXATTRS_UNSPECIFIED);
770         if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
771             if (hostmem) {
772                 address_space_unmap(as, hostmem, buflen, false, 0);
773             }
774             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
775             qemu_log_mask(LOG_GUEST_ERROR,
776                           "%s: could not read command at 0x%" PRIx64 "\n",
777                           __func__, s->cq.base_addr + cq_offset);
778             break;
779         }
780         for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
781             cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
782         }
783         address_space_unmap(as, hostmem, buflen, false, 0);
784 
785         cmd = cmdpkt[0] & CMD_MASK;
786 
787         trace_gicv3_its_process_command(rd_offset, cmd);
788 
789         switch (cmd) {
790         case GITS_CMD_INT:
791             result = process_its_cmd(s, cmdpkt, INTERRUPT);
792             break;
793         case GITS_CMD_CLEAR:
794             result = process_its_cmd(s, cmdpkt, CLEAR);
795             break;
796         case GITS_CMD_SYNC:
797             /*
798              * Current implementation makes a blocking synchronous call
799              * for every command issued earlier, hence the internal state
800              * is already consistent by the time SYNC command is executed.
801              * Hence no further processing is required for SYNC command.
802              */
803             trace_gicv3_its_cmd_sync();
804             break;
805         case GITS_CMD_MAPD:
806             result = process_mapd(s, cmdpkt);
807             break;
808         case GITS_CMD_MAPC:
809             result = process_mapc(s, cmdpkt);
810             break;
811         case GITS_CMD_MAPTI:
812             result = process_mapti(s, cmdpkt, false);
813             break;
814         case GITS_CMD_MAPI:
815             result = process_mapti(s, cmdpkt, true);
816             break;
817         case GITS_CMD_DISCARD:
818             result = process_its_cmd(s, cmdpkt, DISCARD);
819             break;
820         case GITS_CMD_INV:
821         case GITS_CMD_INVALL:
822             /*
823              * Current implementation doesn't cache any ITS tables,
824              * but the calculated lpi priority information. We only
825              * need to trigger lpi priority re-calculation to be in
826              * sync with LPI config table or pending table changes.
827              */
828             trace_gicv3_its_cmd_inv();
829             for (i = 0; i < s->gicv3->num_cpu; i++) {
830                 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
831             }
832             break;
833         case GITS_CMD_MOVI:
834             result = process_movi(s, cmdpkt);
835             break;
836         case GITS_CMD_MOVALL:
837             result = process_movall(s, cmdpkt);
838             break;
839         default:
840             trace_gicv3_its_cmd_unknown(cmd);
841             break;
842         }
843         if (result == CMD_CONTINUE) {
844             rd_offset++;
845             rd_offset %= s->cq.num_entries;
846             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
847         } else {
848             /* CMD_STALL */
849             s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
850             qemu_log_mask(LOG_GUEST_ERROR,
851                           "%s: 0x%x cmd processing failed, stalling\n",
852                           __func__, cmd);
853             break;
854         }
855     }
856 }
857 
858 /*
859  * This function extracts the ITS Device and Collection table specific
860  * parameters (like base_addr, size etc) from GITS_BASER register.
861  * It is called during ITS enable and also during post_load migration
862  */
863 static void extract_table_params(GICv3ITSState *s)
864 {
865     uint16_t num_pages = 0;
866     uint8_t  page_sz_type;
867     uint8_t type;
868     uint32_t page_sz = 0;
869     uint64_t value;
870 
871     for (int i = 0; i < 8; i++) {
872         TableDesc *td;
873         int idbits;
874 
875         value = s->baser[i];
876 
877         if (!value) {
878             continue;
879         }
880 
881         page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
882 
883         switch (page_sz_type) {
884         case 0:
885             page_sz = GITS_PAGE_SIZE_4K;
886             break;
887 
888         case 1:
889             page_sz = GITS_PAGE_SIZE_16K;
890             break;
891 
892         case 2:
893         case 3:
894             page_sz = GITS_PAGE_SIZE_64K;
895             break;
896 
897         default:
898             g_assert_not_reached();
899         }
900 
901         num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
902 
903         type = FIELD_EX64(value, GITS_BASER, TYPE);
904 
905         switch (type) {
906         case GITS_BASER_TYPE_DEVICE:
907             td = &s->dt;
908             idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
909             break;
910         case GITS_BASER_TYPE_COLLECTION:
911             td = &s->ct;
912             if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
913                 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
914             } else {
915                 /* 16-bit CollectionId supported when CIL == 0 */
916                 idbits = 16;
917             }
918             break;
919         default:
920             /*
921              * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
922              * ensures we will only see type values corresponding to
923              * the values set up in gicv3_its_reset().
924              */
925             g_assert_not_reached();
926         }
927 
928         memset(td, 0, sizeof(*td));
929         /*
930          * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
931          * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
932          * do not have a special case where the GITS_BASER<n>.Valid bit is 0
933          * for the register corresponding to the Collection table but we
934          * still have to process interrupts using non-memory-backed
935          * Collection table entries.)
936          * The specification makes it UNPREDICTABLE to enable the ITS without
937          * marking each BASER<n> as valid. We choose to handle these as if
938          * the table was zero-sized, so commands using the table will fail
939          * and interrupts requested via GITS_TRANSLATER writes will be ignored.
940          * This happens automatically by leaving the num_entries field at
941          * zero, which will be caught by the bounds checks we have before
942          * every table lookup anyway.
943          */
944         if (!FIELD_EX64(value, GITS_BASER, VALID)) {
945             continue;
946         }
947         td->page_sz = page_sz;
948         td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
949         td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
950         td->base_addr = baser_base_addr(value, page_sz);
951         if (!td->indirect) {
952             td->num_entries = (num_pages * page_sz) / td->entry_sz;
953         } else {
954             td->num_entries = (((num_pages * page_sz) /
955                                   L1TABLE_ENTRY_SIZE) *
956                                  (page_sz / td->entry_sz));
957         }
958         td->num_entries = MIN(td->num_entries, 1ULL << idbits);
959     }
960 }
961 
962 static void extract_cmdq_params(GICv3ITSState *s)
963 {
964     uint16_t num_pages = 0;
965     uint64_t value = s->cbaser;
966 
967     num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
968 
969     memset(&s->cq, 0 , sizeof(s->cq));
970 
971     if (FIELD_EX64(value, GITS_CBASER, VALID)) {
972         s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
973                              GITS_CMDQ_ENTRY_SIZE;
974         s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
975         s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
976     }
977 }
978 
979 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
980                                               uint64_t *data, unsigned size,
981                                               MemTxAttrs attrs)
982 {
983     /*
984      * GITS_TRANSLATER is write-only, and all other addresses
985      * in the interrupt translation space frame are RES0.
986      */
987     *data = 0;
988     return MEMTX_OK;
989 }
990 
991 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
992                                                uint64_t data, unsigned size,
993                                                MemTxAttrs attrs)
994 {
995     GICv3ITSState *s = (GICv3ITSState *)opaque;
996     bool result = true;
997 
998     trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
999 
1000     switch (offset) {
1001     case GITS_TRANSLATER:
1002         if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1003             result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
1004         }
1005         break;
1006     default:
1007         break;
1008     }
1009 
1010     if (result) {
1011         return MEMTX_OK;
1012     } else {
1013         return MEMTX_ERROR;
1014     }
1015 }
1016 
1017 static bool its_writel(GICv3ITSState *s, hwaddr offset,
1018                               uint64_t value, MemTxAttrs attrs)
1019 {
1020     bool result = true;
1021     int index;
1022 
1023     switch (offset) {
1024     case GITS_CTLR:
1025         if (value & R_GITS_CTLR_ENABLED_MASK) {
1026             s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
1027             extract_table_params(s);
1028             extract_cmdq_params(s);
1029             process_cmdq(s);
1030         } else {
1031             s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
1032         }
1033         break;
1034     case GITS_CBASER:
1035         /*
1036          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1037          *                 already enabled
1038          */
1039         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1040             s->cbaser = deposit64(s->cbaser, 0, 32, value);
1041             s->creadr = 0;
1042         }
1043         break;
1044     case GITS_CBASER + 4:
1045         /*
1046          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1047          *                 already enabled
1048          */
1049         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1050             s->cbaser = deposit64(s->cbaser, 32, 32, value);
1051             s->creadr = 0;
1052         }
1053         break;
1054     case GITS_CWRITER:
1055         s->cwriter = deposit64(s->cwriter, 0, 32,
1056                                (value & ~R_GITS_CWRITER_RETRY_MASK));
1057         if (s->cwriter != s->creadr) {
1058             process_cmdq(s);
1059         }
1060         break;
1061     case GITS_CWRITER + 4:
1062         s->cwriter = deposit64(s->cwriter, 32, 32, value);
1063         break;
1064     case GITS_CREADR:
1065         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1066             s->creadr = deposit64(s->creadr, 0, 32,
1067                                   (value & ~R_GITS_CREADR_STALLED_MASK));
1068         } else {
1069             /* RO register, ignore the write */
1070             qemu_log_mask(LOG_GUEST_ERROR,
1071                           "%s: invalid guest write to RO register at offset "
1072                           TARGET_FMT_plx "\n", __func__, offset);
1073         }
1074         break;
1075     case GITS_CREADR + 4:
1076         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1077             s->creadr = deposit64(s->creadr, 32, 32, value);
1078         } else {
1079             /* RO register, ignore the write */
1080             qemu_log_mask(LOG_GUEST_ERROR,
1081                           "%s: invalid guest write to RO register at offset "
1082                           TARGET_FMT_plx "\n", __func__, offset);
1083         }
1084         break;
1085     case GITS_BASER ... GITS_BASER + 0x3f:
1086         /*
1087          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1088          *                 already enabled
1089          */
1090         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1091             index = (offset - GITS_BASER) / 8;
1092 
1093             if (s->baser[index] == 0) {
1094                 /* Unimplemented GITS_BASERn: RAZ/WI */
1095                 break;
1096             }
1097             if (offset & 7) {
1098                 value <<= 32;
1099                 value &= ~GITS_BASER_RO_MASK;
1100                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1101                 s->baser[index] |= value;
1102             } else {
1103                 value &= ~GITS_BASER_RO_MASK;
1104                 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1105                 s->baser[index] |= value;
1106             }
1107         }
1108         break;
1109     case GITS_IIDR:
1110     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1111         /* RO registers, ignore the write */
1112         qemu_log_mask(LOG_GUEST_ERROR,
1113                       "%s: invalid guest write to RO register at offset "
1114                       TARGET_FMT_plx "\n", __func__, offset);
1115         break;
1116     default:
1117         result = false;
1118         break;
1119     }
1120     return result;
1121 }
1122 
1123 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1124                              uint64_t *data, MemTxAttrs attrs)
1125 {
1126     bool result = true;
1127     int index;
1128 
1129     switch (offset) {
1130     case GITS_CTLR:
1131         *data = s->ctlr;
1132         break;
1133     case GITS_IIDR:
1134         *data = gicv3_iidr();
1135         break;
1136     case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1137         /* ID registers */
1138         *data = gicv3_idreg(offset - GITS_IDREGS);
1139         break;
1140     case GITS_TYPER:
1141         *data = extract64(s->typer, 0, 32);
1142         break;
1143     case GITS_TYPER + 4:
1144         *data = extract64(s->typer, 32, 32);
1145         break;
1146     case GITS_CBASER:
1147         *data = extract64(s->cbaser, 0, 32);
1148         break;
1149     case GITS_CBASER + 4:
1150         *data = extract64(s->cbaser, 32, 32);
1151         break;
1152     case GITS_CREADR:
1153         *data = extract64(s->creadr, 0, 32);
1154         break;
1155     case GITS_CREADR + 4:
1156         *data = extract64(s->creadr, 32, 32);
1157         break;
1158     case GITS_CWRITER:
1159         *data = extract64(s->cwriter, 0, 32);
1160         break;
1161     case GITS_CWRITER + 4:
1162         *data = extract64(s->cwriter, 32, 32);
1163         break;
1164     case GITS_BASER ... GITS_BASER + 0x3f:
1165         index = (offset - GITS_BASER) / 8;
1166         if (offset & 7) {
1167             *data = extract64(s->baser[index], 32, 32);
1168         } else {
1169             *data = extract64(s->baser[index], 0, 32);
1170         }
1171         break;
1172     default:
1173         result = false;
1174         break;
1175     }
1176     return result;
1177 }
1178 
1179 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1180                                uint64_t value, MemTxAttrs attrs)
1181 {
1182     bool result = true;
1183     int index;
1184 
1185     switch (offset) {
1186     case GITS_BASER ... GITS_BASER + 0x3f:
1187         /*
1188          * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1189          *                 already enabled
1190          */
1191         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1192             index = (offset - GITS_BASER) / 8;
1193             if (s->baser[index] == 0) {
1194                 /* Unimplemented GITS_BASERn: RAZ/WI */
1195                 break;
1196             }
1197             s->baser[index] &= GITS_BASER_RO_MASK;
1198             s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1199         }
1200         break;
1201     case GITS_CBASER:
1202         /*
1203          * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1204          *                 already enabled
1205          */
1206         if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1207             s->cbaser = value;
1208             s->creadr = 0;
1209         }
1210         break;
1211     case GITS_CWRITER:
1212         s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1213         if (s->cwriter != s->creadr) {
1214             process_cmdq(s);
1215         }
1216         break;
1217     case GITS_CREADR:
1218         if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1219             s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1220         } else {
1221             /* RO register, ignore the write */
1222             qemu_log_mask(LOG_GUEST_ERROR,
1223                           "%s: invalid guest write to RO register at offset "
1224                           TARGET_FMT_plx "\n", __func__, offset);
1225         }
1226         break;
1227     case GITS_TYPER:
1228         /* RO registers, ignore the write */
1229         qemu_log_mask(LOG_GUEST_ERROR,
1230                       "%s: invalid guest write to RO register at offset "
1231                       TARGET_FMT_plx "\n", __func__, offset);
1232         break;
1233     default:
1234         result = false;
1235         break;
1236     }
1237     return result;
1238 }
1239 
1240 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1241                               uint64_t *data, MemTxAttrs attrs)
1242 {
1243     bool result = true;
1244     int index;
1245 
1246     switch (offset) {
1247     case GITS_TYPER:
1248         *data = s->typer;
1249         break;
1250     case GITS_BASER ... GITS_BASER + 0x3f:
1251         index = (offset - GITS_BASER) / 8;
1252         *data = s->baser[index];
1253         break;
1254     case GITS_CBASER:
1255         *data = s->cbaser;
1256         break;
1257     case GITS_CREADR:
1258         *data = s->creadr;
1259         break;
1260     case GITS_CWRITER:
1261         *data = s->cwriter;
1262         break;
1263     default:
1264         result = false;
1265         break;
1266     }
1267     return result;
1268 }
1269 
1270 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1271                                   unsigned size, MemTxAttrs attrs)
1272 {
1273     GICv3ITSState *s = (GICv3ITSState *)opaque;
1274     bool result;
1275 
1276     switch (size) {
1277     case 4:
1278         result = its_readl(s, offset, data, attrs);
1279         break;
1280     case 8:
1281         result = its_readll(s, offset, data, attrs);
1282         break;
1283     default:
1284         result = false;
1285         break;
1286     }
1287 
1288     if (!result) {
1289         qemu_log_mask(LOG_GUEST_ERROR,
1290                       "%s: invalid guest read at offset " TARGET_FMT_plx
1291                       "size %u\n", __func__, offset, size);
1292         trace_gicv3_its_badread(offset, size);
1293         /*
1294          * The spec requires that reserved registers are RAZ/WI;
1295          * so use false returns from leaf functions as a way to
1296          * trigger the guest-error logging but don't return it to
1297          * the caller, or we'll cause a spurious guest data abort.
1298          */
1299         *data = 0;
1300     } else {
1301         trace_gicv3_its_read(offset, *data, size);
1302     }
1303     return MEMTX_OK;
1304 }
1305 
1306 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1307                                    unsigned size, MemTxAttrs attrs)
1308 {
1309     GICv3ITSState *s = (GICv3ITSState *)opaque;
1310     bool result;
1311 
1312     switch (size) {
1313     case 4:
1314         result = its_writel(s, offset, data, attrs);
1315         break;
1316     case 8:
1317         result = its_writell(s, offset, data, attrs);
1318         break;
1319     default:
1320         result = false;
1321         break;
1322     }
1323 
1324     if (!result) {
1325         qemu_log_mask(LOG_GUEST_ERROR,
1326                       "%s: invalid guest write at offset " TARGET_FMT_plx
1327                       "size %u\n", __func__, offset, size);
1328         trace_gicv3_its_badwrite(offset, data, size);
1329         /*
1330          * The spec requires that reserved registers are RAZ/WI;
1331          * so use false returns from leaf functions as a way to
1332          * trigger the guest-error logging but don't return it to
1333          * the caller, or we'll cause a spurious guest data abort.
1334          */
1335     } else {
1336         trace_gicv3_its_write(offset, data, size);
1337     }
1338     return MEMTX_OK;
1339 }
1340 
1341 static const MemoryRegionOps gicv3_its_control_ops = {
1342     .read_with_attrs = gicv3_its_read,
1343     .write_with_attrs = gicv3_its_write,
1344     .valid.min_access_size = 4,
1345     .valid.max_access_size = 8,
1346     .impl.min_access_size = 4,
1347     .impl.max_access_size = 8,
1348     .endianness = DEVICE_NATIVE_ENDIAN,
1349 };
1350 
1351 static const MemoryRegionOps gicv3_its_translation_ops = {
1352     .read_with_attrs = gicv3_its_translation_read,
1353     .write_with_attrs = gicv3_its_translation_write,
1354     .valid.min_access_size = 2,
1355     .valid.max_access_size = 4,
1356     .impl.min_access_size = 2,
1357     .impl.max_access_size = 4,
1358     .endianness = DEVICE_NATIVE_ENDIAN,
1359 };
1360 
1361 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1362 {
1363     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1364     int i;
1365 
1366     for (i = 0; i < s->gicv3->num_cpu; i++) {
1367         if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1368             error_setg(errp, "Physical LPI not supported by CPU %d", i);
1369             return;
1370         }
1371     }
1372 
1373     gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1374 
1375     /* set the ITS default features supported */
1376     s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1377     s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1378                           ITS_ITT_ENTRY_SIZE - 1);
1379     s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1380     s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1381     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1382     s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1383 }
1384 
1385 static void gicv3_its_reset(DeviceState *dev)
1386 {
1387     GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1388     GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1389 
1390     c->parent_reset(dev);
1391 
1392     /* Quiescent bit reset to 1 */
1393     s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1394 
1395     /*
1396      * setting GITS_BASER0.Type = 0b001 (Device)
1397      *         GITS_BASER1.Type = 0b100 (Collection Table)
1398      *         GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1399      *         GITS_BASER<0,1>.Page_Size = 64KB
1400      * and default translation table entry size to 16 bytes
1401      */
1402     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1403                              GITS_BASER_TYPE_DEVICE);
1404     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1405                              GITS_BASER_PAGESIZE_64K);
1406     s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1407                              GITS_DTE_SIZE - 1);
1408 
1409     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1410                              GITS_BASER_TYPE_COLLECTION);
1411     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1412                              GITS_BASER_PAGESIZE_64K);
1413     s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1414                              GITS_CTE_SIZE - 1);
1415 }
1416 
1417 static void gicv3_its_post_load(GICv3ITSState *s)
1418 {
1419     if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1420         extract_table_params(s);
1421         extract_cmdq_params(s);
1422     }
1423 }
1424 
1425 static Property gicv3_its_props[] = {
1426     DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1427                      GICv3State *),
1428     DEFINE_PROP_END_OF_LIST(),
1429 };
1430 
1431 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1432 {
1433     DeviceClass *dc = DEVICE_CLASS(klass);
1434     GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1435     GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1436 
1437     dc->realize = gicv3_arm_its_realize;
1438     device_class_set_props(dc, gicv3_its_props);
1439     device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1440     icc->post_load = gicv3_its_post_load;
1441 }
1442 
1443 static const TypeInfo gicv3_its_info = {
1444     .name = TYPE_ARM_GICV3_ITS,
1445     .parent = TYPE_ARM_GICV3_ITS_COMMON,
1446     .instance_size = sizeof(GICv3ITSState),
1447     .class_init = gicv3_its_class_init,
1448     .class_size = sizeof(GICv3ITSClass),
1449 };
1450 
1451 static void gicv3_its_register_types(void)
1452 {
1453     type_register_static(&gicv3_its_info);
1454 }
1455 
1456 type_init(gicv3_its_register_types)
1457