xref: /qemu/hw/arm/smmuv3.c (revision fae4be38b35dcfae48494c023454e8988c15b69a)
110a83cb9SPrem Mallappa /*
210a83cb9SPrem Mallappa  * Copyright (C) 2014-2016 Broadcom Corporation
310a83cb9SPrem Mallappa  * Copyright (c) 2017 Red Hat, Inc.
410a83cb9SPrem Mallappa  * Written by Prem Mallappa, Eric Auger
510a83cb9SPrem Mallappa  *
610a83cb9SPrem Mallappa  * This program is free software; you can redistribute it and/or modify
710a83cb9SPrem Mallappa  * it under the terms of the GNU General Public License version 2 as
810a83cb9SPrem Mallappa  * published by the Free Software Foundation.
910a83cb9SPrem Mallappa  *
1010a83cb9SPrem Mallappa  * This program is distributed in the hope that it will be useful,
1110a83cb9SPrem Mallappa  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1210a83cb9SPrem Mallappa  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1310a83cb9SPrem Mallappa  * GNU General Public License for more details.
1410a83cb9SPrem Mallappa  *
1510a83cb9SPrem Mallappa  * You should have received a copy of the GNU General Public License along
1610a83cb9SPrem Mallappa  * with this program; if not, see <http://www.gnu.org/licenses/>.
1710a83cb9SPrem Mallappa  */
1810a83cb9SPrem Mallappa 
1910a83cb9SPrem Mallappa #include "qemu/osdep.h"
2010a83cb9SPrem Mallappa #include "hw/boards.h"
2110a83cb9SPrem Mallappa #include "sysemu/sysemu.h"
2210a83cb9SPrem Mallappa #include "hw/sysbus.h"
2310a83cb9SPrem Mallappa #include "hw/qdev-core.h"
2410a83cb9SPrem Mallappa #include "hw/pci/pci.h"
2510a83cb9SPrem Mallappa #include "exec/address-spaces.h"
2610a83cb9SPrem Mallappa #include "trace.h"
2710a83cb9SPrem Mallappa #include "qemu/log.h"
2810a83cb9SPrem Mallappa #include "qemu/error-report.h"
2910a83cb9SPrem Mallappa #include "qapi/error.h"
3010a83cb9SPrem Mallappa 
3110a83cb9SPrem Mallappa #include "hw/arm/smmuv3.h"
3210a83cb9SPrem Mallappa #include "smmuv3-internal.h"
3310a83cb9SPrem Mallappa 
346a736033SEric Auger /**
356a736033SEric Auger  * smmuv3_trigger_irq - pulse @irq if enabled and update
366a736033SEric Auger  * GERROR register in case of GERROR interrupt
376a736033SEric Auger  *
386a736033SEric Auger  * @irq: irq type
396a736033SEric Auger  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
406a736033SEric Auger  */
41*fae4be38SEric Auger static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
42*fae4be38SEric Auger                                uint32_t gerror_mask)
436a736033SEric Auger {
446a736033SEric Auger 
456a736033SEric Auger     bool pulse = false;
466a736033SEric Auger 
476a736033SEric Auger     switch (irq) {
486a736033SEric Auger     case SMMU_IRQ_EVTQ:
496a736033SEric Auger         pulse = smmuv3_eventq_irq_enabled(s);
506a736033SEric Auger         break;
516a736033SEric Auger     case SMMU_IRQ_PRIQ:
526a736033SEric Auger         qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
536a736033SEric Auger         break;
546a736033SEric Auger     case SMMU_IRQ_CMD_SYNC:
556a736033SEric Auger         pulse = true;
566a736033SEric Auger         break;
576a736033SEric Auger     case SMMU_IRQ_GERROR:
586a736033SEric Auger     {
596a736033SEric Auger         uint32_t pending = s->gerror ^ s->gerrorn;
606a736033SEric Auger         uint32_t new_gerrors = ~pending & gerror_mask;
616a736033SEric Auger 
626a736033SEric Auger         if (!new_gerrors) {
636a736033SEric Auger             /* only toggle non pending errors */
646a736033SEric Auger             return;
656a736033SEric Auger         }
666a736033SEric Auger         s->gerror ^= new_gerrors;
676a736033SEric Auger         trace_smmuv3_write_gerror(new_gerrors, s->gerror);
686a736033SEric Auger 
696a736033SEric Auger         pulse = smmuv3_gerror_irq_enabled(s);
706a736033SEric Auger         break;
716a736033SEric Auger     }
726a736033SEric Auger     }
736a736033SEric Auger     if (pulse) {
746a736033SEric Auger             trace_smmuv3_trigger_irq(irq);
756a736033SEric Auger             qemu_irq_pulse(s->irq[irq]);
766a736033SEric Auger     }
776a736033SEric Auger }
786a736033SEric Auger 
79*fae4be38SEric Auger static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
806a736033SEric Auger {
816a736033SEric Auger     uint32_t pending = s->gerror ^ s->gerrorn;
826a736033SEric Auger     uint32_t toggled = s->gerrorn ^ new_gerrorn;
836a736033SEric Auger 
846a736033SEric Auger     if (toggled & ~pending) {
856a736033SEric Auger         qemu_log_mask(LOG_GUEST_ERROR,
866a736033SEric Auger                       "guest toggles non pending errors = 0x%x\n",
876a736033SEric Auger                       toggled & ~pending);
886a736033SEric Auger     }
896a736033SEric Auger 
906a736033SEric Auger     /*
916a736033SEric Auger      * We do not raise any error in case guest toggles bits corresponding
926a736033SEric Auger      * to not active IRQs (CONSTRAINED UNPREDICTABLE)
936a736033SEric Auger      */
946a736033SEric Auger     s->gerrorn = new_gerrorn;
956a736033SEric Auger 
966a736033SEric Auger     trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
976a736033SEric Auger }
986a736033SEric Auger 
99dadd1a08SEric Auger static inline MemTxResult queue_read(SMMUQueue *q, void *data)
100dadd1a08SEric Auger {
101dadd1a08SEric Auger     dma_addr_t addr = Q_CONS_ENTRY(q);
102dadd1a08SEric Auger 
103dadd1a08SEric Auger     return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
104dadd1a08SEric Auger }
105dadd1a08SEric Auger 
106dadd1a08SEric Auger static MemTxResult queue_write(SMMUQueue *q, void *data)
107dadd1a08SEric Auger {
108dadd1a08SEric Auger     dma_addr_t addr = Q_PROD_ENTRY(q);
109dadd1a08SEric Auger     MemTxResult ret;
110dadd1a08SEric Auger 
111dadd1a08SEric Auger     ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
112dadd1a08SEric Auger     if (ret != MEMTX_OK) {
113dadd1a08SEric Auger         return ret;
114dadd1a08SEric Auger     }
115dadd1a08SEric Auger 
116dadd1a08SEric Auger     queue_prod_incr(q);
117dadd1a08SEric Auger     return MEMTX_OK;
118dadd1a08SEric Auger }
119dadd1a08SEric Auger 
120dadd1a08SEric Auger void smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
121dadd1a08SEric Auger {
122dadd1a08SEric Auger     SMMUQueue *q = &s->eventq;
123dadd1a08SEric Auger 
124dadd1a08SEric Auger     if (!smmuv3_eventq_enabled(s)) {
125dadd1a08SEric Auger         return;
126dadd1a08SEric Auger     }
127dadd1a08SEric Auger 
128dadd1a08SEric Auger     if (smmuv3_q_full(q)) {
129dadd1a08SEric Auger         return;
130dadd1a08SEric Auger     }
131dadd1a08SEric Auger 
132dadd1a08SEric Auger     queue_write(q, evt);
133dadd1a08SEric Auger 
134dadd1a08SEric Auger     if (smmuv3_q_empty(q)) {
135dadd1a08SEric Auger         smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
136dadd1a08SEric Auger     }
137dadd1a08SEric Auger }
138dadd1a08SEric Auger 
13910a83cb9SPrem Mallappa static void smmuv3_init_regs(SMMUv3State *s)
14010a83cb9SPrem Mallappa {
14110a83cb9SPrem Mallappa     /**
14210a83cb9SPrem Mallappa      * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
14310a83cb9SPrem Mallappa      *       multi-level stream table
14410a83cb9SPrem Mallappa      */
14510a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
14610a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
14710a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
14810a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
14910a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
15010a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
15110a83cb9SPrem Mallappa     /* terminated transaction will always be aborted/error returned */
15210a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
15310a83cb9SPrem Mallappa     /* 2-level stream table supported */
15410a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
15510a83cb9SPrem Mallappa 
15610a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
15710a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
15810a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
15910a83cb9SPrem Mallappa 
16010a83cb9SPrem Mallappa    /* 4K and 64K granule support */
16110a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
16210a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
16310a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
16410a83cb9SPrem Mallappa 
16510a83cb9SPrem Mallappa     s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
16610a83cb9SPrem Mallappa     s->cmdq.prod = 0;
16710a83cb9SPrem Mallappa     s->cmdq.cons = 0;
16810a83cb9SPrem Mallappa     s->cmdq.entry_size = sizeof(struct Cmd);
16910a83cb9SPrem Mallappa     s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
17010a83cb9SPrem Mallappa     s->eventq.prod = 0;
17110a83cb9SPrem Mallappa     s->eventq.cons = 0;
17210a83cb9SPrem Mallappa     s->eventq.entry_size = sizeof(struct Evt);
17310a83cb9SPrem Mallappa 
17410a83cb9SPrem Mallappa     s->features = 0;
17510a83cb9SPrem Mallappa     s->sid_split = 0;
17610a83cb9SPrem Mallappa }
17710a83cb9SPrem Mallappa 
178*fae4be38SEric Auger static int smmuv3_cmdq_consume(SMMUv3State *s)
179dadd1a08SEric Auger {
180dadd1a08SEric Auger     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
181dadd1a08SEric Auger     SMMUQueue *q = &s->cmdq;
182dadd1a08SEric Auger     SMMUCommandType type = 0;
183dadd1a08SEric Auger 
184dadd1a08SEric Auger     if (!smmuv3_cmdq_enabled(s)) {
185dadd1a08SEric Auger         return 0;
186dadd1a08SEric Auger     }
187dadd1a08SEric Auger     /*
188dadd1a08SEric Auger      * some commands depend on register values, typically CR0. In case those
189dadd1a08SEric Auger      * register values change while handling the command, spec says it
190dadd1a08SEric Auger      * is UNPREDICTABLE whether the command is interpreted under the new
191dadd1a08SEric Auger      * or old value.
192dadd1a08SEric Auger      */
193dadd1a08SEric Auger 
194dadd1a08SEric Auger     while (!smmuv3_q_empty(q)) {
195dadd1a08SEric Auger         uint32_t pending = s->gerror ^ s->gerrorn;
196dadd1a08SEric Auger         Cmd cmd;
197dadd1a08SEric Auger 
198dadd1a08SEric Auger         trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
199dadd1a08SEric Auger                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
200dadd1a08SEric Auger 
201dadd1a08SEric Auger         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
202dadd1a08SEric Auger             break;
203dadd1a08SEric Auger         }
204dadd1a08SEric Auger 
205dadd1a08SEric Auger         if (queue_read(q, &cmd) != MEMTX_OK) {
206dadd1a08SEric Auger             cmd_error = SMMU_CERROR_ABT;
207dadd1a08SEric Auger             break;
208dadd1a08SEric Auger         }
209dadd1a08SEric Auger 
210dadd1a08SEric Auger         type = CMD_TYPE(&cmd);
211dadd1a08SEric Auger 
212dadd1a08SEric Auger         trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
213dadd1a08SEric Auger 
214dadd1a08SEric Auger         switch (type) {
215dadd1a08SEric Auger         case SMMU_CMD_SYNC:
216dadd1a08SEric Auger             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
217dadd1a08SEric Auger                 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
218dadd1a08SEric Auger             }
219dadd1a08SEric Auger             break;
220dadd1a08SEric Auger         case SMMU_CMD_PREFETCH_CONFIG:
221dadd1a08SEric Auger         case SMMU_CMD_PREFETCH_ADDR:
222dadd1a08SEric Auger         case SMMU_CMD_CFGI_STE:
223dadd1a08SEric Auger         case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
224dadd1a08SEric Auger         case SMMU_CMD_CFGI_CD:
225dadd1a08SEric Auger         case SMMU_CMD_CFGI_CD_ALL:
226dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_ALL:
227dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_ASID:
228dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_VA:
229dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_VAA:
230dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL3_ALL:
231dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL3_VA:
232dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_ALL:
233dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_ASID:
234dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_VA:
235dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_VAA:
236dadd1a08SEric Auger         case SMMU_CMD_TLBI_S12_VMALL:
237dadd1a08SEric Auger         case SMMU_CMD_TLBI_S2_IPA:
238dadd1a08SEric Auger         case SMMU_CMD_TLBI_NSNH_ALL:
239dadd1a08SEric Auger         case SMMU_CMD_ATC_INV:
240dadd1a08SEric Auger         case SMMU_CMD_PRI_RESP:
241dadd1a08SEric Auger         case SMMU_CMD_RESUME:
242dadd1a08SEric Auger         case SMMU_CMD_STALL_TERM:
243dadd1a08SEric Auger             trace_smmuv3_unhandled_cmd(type);
244dadd1a08SEric Auger             break;
245dadd1a08SEric Auger         default:
246dadd1a08SEric Auger             cmd_error = SMMU_CERROR_ILL;
247dadd1a08SEric Auger             qemu_log_mask(LOG_GUEST_ERROR,
248dadd1a08SEric Auger                           "Illegal command type: %d\n", CMD_TYPE(&cmd));
249dadd1a08SEric Auger             break;
250dadd1a08SEric Auger         }
251dadd1a08SEric Auger         if (cmd_error) {
252dadd1a08SEric Auger             break;
253dadd1a08SEric Auger         }
254dadd1a08SEric Auger         /*
255dadd1a08SEric Auger          * We only increment the cons index after the completion of
256dadd1a08SEric Auger          * the command. We do that because the SYNC returns immediately
257dadd1a08SEric Auger          * and does not check the completion of previous commands
258dadd1a08SEric Auger          */
259dadd1a08SEric Auger         queue_cons_incr(q);
260dadd1a08SEric Auger     }
261dadd1a08SEric Auger 
262dadd1a08SEric Auger     if (cmd_error) {
263dadd1a08SEric Auger         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
264dadd1a08SEric Auger         smmu_write_cmdq_err(s, cmd_error);
265dadd1a08SEric Auger         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
266dadd1a08SEric Auger     }
267dadd1a08SEric Auger 
268dadd1a08SEric Auger     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
269dadd1a08SEric Auger                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
270dadd1a08SEric Auger 
271dadd1a08SEric Auger     return 0;
272dadd1a08SEric Auger }
273dadd1a08SEric Auger 
274*fae4be38SEric Auger static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
275*fae4be38SEric Auger                                uint64_t data, MemTxAttrs attrs)
276*fae4be38SEric Auger {
277*fae4be38SEric Auger     switch (offset) {
278*fae4be38SEric Auger     case A_GERROR_IRQ_CFG0:
279*fae4be38SEric Auger         s->gerror_irq_cfg0 = data;
280*fae4be38SEric Auger         return MEMTX_OK;
281*fae4be38SEric Auger     case A_STRTAB_BASE:
282*fae4be38SEric Auger         s->strtab_base = data;
283*fae4be38SEric Auger         return MEMTX_OK;
284*fae4be38SEric Auger     case A_CMDQ_BASE:
285*fae4be38SEric Auger         s->cmdq.base = data;
286*fae4be38SEric Auger         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
287*fae4be38SEric Auger         if (s->cmdq.log2size > SMMU_CMDQS) {
288*fae4be38SEric Auger             s->cmdq.log2size = SMMU_CMDQS;
289*fae4be38SEric Auger         }
290*fae4be38SEric Auger         return MEMTX_OK;
291*fae4be38SEric Auger     case A_EVENTQ_BASE:
292*fae4be38SEric Auger         s->eventq.base = data;
293*fae4be38SEric Auger         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
294*fae4be38SEric Auger         if (s->eventq.log2size > SMMU_EVENTQS) {
295*fae4be38SEric Auger             s->eventq.log2size = SMMU_EVENTQS;
296*fae4be38SEric Auger         }
297*fae4be38SEric Auger         return MEMTX_OK;
298*fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0:
299*fae4be38SEric Auger         s->eventq_irq_cfg0 = data;
300*fae4be38SEric Auger         return MEMTX_OK;
301*fae4be38SEric Auger     default:
302*fae4be38SEric Auger         qemu_log_mask(LOG_UNIMP,
303*fae4be38SEric Auger                       "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
304*fae4be38SEric Auger                       __func__, offset);
305*fae4be38SEric Auger         return MEMTX_OK;
306*fae4be38SEric Auger     }
307*fae4be38SEric Auger }
308*fae4be38SEric Auger 
309*fae4be38SEric Auger static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
310*fae4be38SEric Auger                                uint64_t data, MemTxAttrs attrs)
311*fae4be38SEric Auger {
312*fae4be38SEric Auger     switch (offset) {
313*fae4be38SEric Auger     case A_CR0:
314*fae4be38SEric Auger         s->cr[0] = data;
315*fae4be38SEric Auger         s->cr0ack = data & ~SMMU_CR0_RESERVED;
316*fae4be38SEric Auger         /* in case the command queue has been enabled */
317*fae4be38SEric Auger         smmuv3_cmdq_consume(s);
318*fae4be38SEric Auger         return MEMTX_OK;
319*fae4be38SEric Auger     case A_CR1:
320*fae4be38SEric Auger         s->cr[1] = data;
321*fae4be38SEric Auger         return MEMTX_OK;
322*fae4be38SEric Auger     case A_CR2:
323*fae4be38SEric Auger         s->cr[2] = data;
324*fae4be38SEric Auger         return MEMTX_OK;
325*fae4be38SEric Auger     case A_IRQ_CTRL:
326*fae4be38SEric Auger         s->irq_ctrl = data;
327*fae4be38SEric Auger         return MEMTX_OK;
328*fae4be38SEric Auger     case A_GERRORN:
329*fae4be38SEric Auger         smmuv3_write_gerrorn(s, data);
330*fae4be38SEric Auger         /*
331*fae4be38SEric Auger          * By acknowledging the CMDQ_ERR, SW may notify cmds can
332*fae4be38SEric Auger          * be processed again
333*fae4be38SEric Auger          */
334*fae4be38SEric Auger         smmuv3_cmdq_consume(s);
335*fae4be38SEric Auger         return MEMTX_OK;
336*fae4be38SEric Auger     case A_GERROR_IRQ_CFG0: /* 64b */
337*fae4be38SEric Auger         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
338*fae4be38SEric Auger         return MEMTX_OK;
339*fae4be38SEric Auger     case A_GERROR_IRQ_CFG0 + 4:
340*fae4be38SEric Auger         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
341*fae4be38SEric Auger         return MEMTX_OK;
342*fae4be38SEric Auger     case A_GERROR_IRQ_CFG1:
343*fae4be38SEric Auger         s->gerror_irq_cfg1 = data;
344*fae4be38SEric Auger         return MEMTX_OK;
345*fae4be38SEric Auger     case A_GERROR_IRQ_CFG2:
346*fae4be38SEric Auger         s->gerror_irq_cfg2 = data;
347*fae4be38SEric Auger         return MEMTX_OK;
348*fae4be38SEric Auger     case A_STRTAB_BASE: /* 64b */
349*fae4be38SEric Auger         s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
350*fae4be38SEric Auger         return MEMTX_OK;
351*fae4be38SEric Auger     case A_STRTAB_BASE + 4:
352*fae4be38SEric Auger         s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
353*fae4be38SEric Auger         return MEMTX_OK;
354*fae4be38SEric Auger     case A_STRTAB_BASE_CFG:
355*fae4be38SEric Auger         s->strtab_base_cfg = data;
356*fae4be38SEric Auger         if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
357*fae4be38SEric Auger             s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
358*fae4be38SEric Auger             s->features |= SMMU_FEATURE_2LVL_STE;
359*fae4be38SEric Auger         }
360*fae4be38SEric Auger         return MEMTX_OK;
361*fae4be38SEric Auger     case A_CMDQ_BASE: /* 64b */
362*fae4be38SEric Auger         s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
363*fae4be38SEric Auger         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
364*fae4be38SEric Auger         if (s->cmdq.log2size > SMMU_CMDQS) {
365*fae4be38SEric Auger             s->cmdq.log2size = SMMU_CMDQS;
366*fae4be38SEric Auger         }
367*fae4be38SEric Auger         return MEMTX_OK;
368*fae4be38SEric Auger     case A_CMDQ_BASE + 4: /* 64b */
369*fae4be38SEric Auger         s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
370*fae4be38SEric Auger         return MEMTX_OK;
371*fae4be38SEric Auger     case A_CMDQ_PROD:
372*fae4be38SEric Auger         s->cmdq.prod = data;
373*fae4be38SEric Auger         smmuv3_cmdq_consume(s);
374*fae4be38SEric Auger         return MEMTX_OK;
375*fae4be38SEric Auger     case A_CMDQ_CONS:
376*fae4be38SEric Auger         s->cmdq.cons = data;
377*fae4be38SEric Auger         return MEMTX_OK;
378*fae4be38SEric Auger     case A_EVENTQ_BASE: /* 64b */
379*fae4be38SEric Auger         s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
380*fae4be38SEric Auger         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
381*fae4be38SEric Auger         if (s->eventq.log2size > SMMU_EVENTQS) {
382*fae4be38SEric Auger             s->eventq.log2size = SMMU_EVENTQS;
383*fae4be38SEric Auger         }
384*fae4be38SEric Auger         return MEMTX_OK;
385*fae4be38SEric Auger     case A_EVENTQ_BASE + 4:
386*fae4be38SEric Auger         s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
387*fae4be38SEric Auger         return MEMTX_OK;
388*fae4be38SEric Auger     case A_EVENTQ_PROD:
389*fae4be38SEric Auger         s->eventq.prod = data;
390*fae4be38SEric Auger         return MEMTX_OK;
391*fae4be38SEric Auger     case A_EVENTQ_CONS:
392*fae4be38SEric Auger         s->eventq.cons = data;
393*fae4be38SEric Auger         return MEMTX_OK;
394*fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0: /* 64b */
395*fae4be38SEric Auger         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
396*fae4be38SEric Auger         return MEMTX_OK;
397*fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0 + 4:
398*fae4be38SEric Auger         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
399*fae4be38SEric Auger         return MEMTX_OK;
400*fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG1:
401*fae4be38SEric Auger         s->eventq_irq_cfg1 = data;
402*fae4be38SEric Auger         return MEMTX_OK;
403*fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG2:
404*fae4be38SEric Auger         s->eventq_irq_cfg2 = data;
405*fae4be38SEric Auger         return MEMTX_OK;
406*fae4be38SEric Auger     default:
407*fae4be38SEric Auger         qemu_log_mask(LOG_UNIMP,
408*fae4be38SEric Auger                       "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
409*fae4be38SEric Auger                       __func__, offset);
410*fae4be38SEric Auger         return MEMTX_OK;
411*fae4be38SEric Auger     }
412*fae4be38SEric Auger }
413*fae4be38SEric Auger 
41410a83cb9SPrem Mallappa static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
41510a83cb9SPrem Mallappa                                    unsigned size, MemTxAttrs attrs)
41610a83cb9SPrem Mallappa {
417*fae4be38SEric Auger     SMMUState *sys = opaque;
418*fae4be38SEric Auger     SMMUv3State *s = ARM_SMMUV3(sys);
419*fae4be38SEric Auger     MemTxResult r;
420*fae4be38SEric Auger 
421*fae4be38SEric Auger     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
422*fae4be38SEric Auger     offset &= ~0x10000;
423*fae4be38SEric Auger 
424*fae4be38SEric Auger     switch (size) {
425*fae4be38SEric Auger     case 8:
426*fae4be38SEric Auger         r = smmu_writell(s, offset, data, attrs);
427*fae4be38SEric Auger         break;
428*fae4be38SEric Auger     case 4:
429*fae4be38SEric Auger         r = smmu_writel(s, offset, data, attrs);
430*fae4be38SEric Auger         break;
431*fae4be38SEric Auger     default:
432*fae4be38SEric Auger         r = MEMTX_ERROR;
433*fae4be38SEric Auger         break;
434*fae4be38SEric Auger     }
435*fae4be38SEric Auger 
436*fae4be38SEric Auger     trace_smmuv3_write_mmio(offset, data, size, r);
437*fae4be38SEric Auger     return r;
43810a83cb9SPrem Mallappa }
43910a83cb9SPrem Mallappa 
44010a83cb9SPrem Mallappa static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
44110a83cb9SPrem Mallappa                                uint64_t *data, MemTxAttrs attrs)
44210a83cb9SPrem Mallappa {
44310a83cb9SPrem Mallappa     switch (offset) {
44410a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0:
44510a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg0;
44610a83cb9SPrem Mallappa         return MEMTX_OK;
44710a83cb9SPrem Mallappa     case A_STRTAB_BASE:
44810a83cb9SPrem Mallappa         *data = s->strtab_base;
44910a83cb9SPrem Mallappa         return MEMTX_OK;
45010a83cb9SPrem Mallappa     case A_CMDQ_BASE:
45110a83cb9SPrem Mallappa         *data = s->cmdq.base;
45210a83cb9SPrem Mallappa         return MEMTX_OK;
45310a83cb9SPrem Mallappa     case A_EVENTQ_BASE:
45410a83cb9SPrem Mallappa         *data = s->eventq.base;
45510a83cb9SPrem Mallappa         return MEMTX_OK;
45610a83cb9SPrem Mallappa     default:
45710a83cb9SPrem Mallappa         *data = 0;
45810a83cb9SPrem Mallappa         qemu_log_mask(LOG_UNIMP,
45910a83cb9SPrem Mallappa                       "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
46010a83cb9SPrem Mallappa                       __func__, offset);
46110a83cb9SPrem Mallappa         return MEMTX_OK;
46210a83cb9SPrem Mallappa     }
46310a83cb9SPrem Mallappa }
46410a83cb9SPrem Mallappa 
46510a83cb9SPrem Mallappa static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
46610a83cb9SPrem Mallappa                               uint64_t *data, MemTxAttrs attrs)
46710a83cb9SPrem Mallappa {
46810a83cb9SPrem Mallappa     switch (offset) {
46910a83cb9SPrem Mallappa     case A_IDREGS ... A_IDREGS + 0x1f:
47010a83cb9SPrem Mallappa         *data = smmuv3_idreg(offset - A_IDREGS);
47110a83cb9SPrem Mallappa         return MEMTX_OK;
47210a83cb9SPrem Mallappa     case A_IDR0 ... A_IDR5:
47310a83cb9SPrem Mallappa         *data = s->idr[(offset - A_IDR0) / 4];
47410a83cb9SPrem Mallappa         return MEMTX_OK;
47510a83cb9SPrem Mallappa     case A_IIDR:
47610a83cb9SPrem Mallappa         *data = s->iidr;
47710a83cb9SPrem Mallappa         return MEMTX_OK;
47810a83cb9SPrem Mallappa     case A_CR0:
47910a83cb9SPrem Mallappa         *data = s->cr[0];
48010a83cb9SPrem Mallappa         return MEMTX_OK;
48110a83cb9SPrem Mallappa     case A_CR0ACK:
48210a83cb9SPrem Mallappa         *data = s->cr0ack;
48310a83cb9SPrem Mallappa         return MEMTX_OK;
48410a83cb9SPrem Mallappa     case A_CR1:
48510a83cb9SPrem Mallappa         *data = s->cr[1];
48610a83cb9SPrem Mallappa         return MEMTX_OK;
48710a83cb9SPrem Mallappa     case A_CR2:
48810a83cb9SPrem Mallappa         *data = s->cr[2];
48910a83cb9SPrem Mallappa         return MEMTX_OK;
49010a83cb9SPrem Mallappa     case A_STATUSR:
49110a83cb9SPrem Mallappa         *data = s->statusr;
49210a83cb9SPrem Mallappa         return MEMTX_OK;
49310a83cb9SPrem Mallappa     case A_IRQ_CTRL:
49410a83cb9SPrem Mallappa     case A_IRQ_CTRL_ACK:
49510a83cb9SPrem Mallappa         *data = s->irq_ctrl;
49610a83cb9SPrem Mallappa         return MEMTX_OK;
49710a83cb9SPrem Mallappa     case A_GERROR:
49810a83cb9SPrem Mallappa         *data = s->gerror;
49910a83cb9SPrem Mallappa         return MEMTX_OK;
50010a83cb9SPrem Mallappa     case A_GERRORN:
50110a83cb9SPrem Mallappa         *data = s->gerrorn;
50210a83cb9SPrem Mallappa         return MEMTX_OK;
50310a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0: /* 64b */
50410a83cb9SPrem Mallappa         *data = extract64(s->gerror_irq_cfg0, 0, 32);
50510a83cb9SPrem Mallappa         return MEMTX_OK;
50610a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0 + 4:
50710a83cb9SPrem Mallappa         *data = extract64(s->gerror_irq_cfg0, 32, 32);
50810a83cb9SPrem Mallappa         return MEMTX_OK;
50910a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG1:
51010a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg1;
51110a83cb9SPrem Mallappa         return MEMTX_OK;
51210a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG2:
51310a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg2;
51410a83cb9SPrem Mallappa         return MEMTX_OK;
51510a83cb9SPrem Mallappa     case A_STRTAB_BASE: /* 64b */
51610a83cb9SPrem Mallappa         *data = extract64(s->strtab_base, 0, 32);
51710a83cb9SPrem Mallappa         return MEMTX_OK;
51810a83cb9SPrem Mallappa     case A_STRTAB_BASE + 4: /* 64b */
51910a83cb9SPrem Mallappa         *data = extract64(s->strtab_base, 32, 32);
52010a83cb9SPrem Mallappa         return MEMTX_OK;
52110a83cb9SPrem Mallappa     case A_STRTAB_BASE_CFG:
52210a83cb9SPrem Mallappa         *data = s->strtab_base_cfg;
52310a83cb9SPrem Mallappa         return MEMTX_OK;
52410a83cb9SPrem Mallappa     case A_CMDQ_BASE: /* 64b */
52510a83cb9SPrem Mallappa         *data = extract64(s->cmdq.base, 0, 32);
52610a83cb9SPrem Mallappa         return MEMTX_OK;
52710a83cb9SPrem Mallappa     case A_CMDQ_BASE + 4:
52810a83cb9SPrem Mallappa         *data = extract64(s->cmdq.base, 32, 32);
52910a83cb9SPrem Mallappa         return MEMTX_OK;
53010a83cb9SPrem Mallappa     case A_CMDQ_PROD:
53110a83cb9SPrem Mallappa         *data = s->cmdq.prod;
53210a83cb9SPrem Mallappa         return MEMTX_OK;
53310a83cb9SPrem Mallappa     case A_CMDQ_CONS:
53410a83cb9SPrem Mallappa         *data = s->cmdq.cons;
53510a83cb9SPrem Mallappa         return MEMTX_OK;
53610a83cb9SPrem Mallappa     case A_EVENTQ_BASE: /* 64b */
53710a83cb9SPrem Mallappa         *data = extract64(s->eventq.base, 0, 32);
53810a83cb9SPrem Mallappa         return MEMTX_OK;
53910a83cb9SPrem Mallappa     case A_EVENTQ_BASE + 4: /* 64b */
54010a83cb9SPrem Mallappa         *data = extract64(s->eventq.base, 32, 32);
54110a83cb9SPrem Mallappa         return MEMTX_OK;
54210a83cb9SPrem Mallappa     case A_EVENTQ_PROD:
54310a83cb9SPrem Mallappa         *data = s->eventq.prod;
54410a83cb9SPrem Mallappa         return MEMTX_OK;
54510a83cb9SPrem Mallappa     case A_EVENTQ_CONS:
54610a83cb9SPrem Mallappa         *data = s->eventq.cons;
54710a83cb9SPrem Mallappa         return MEMTX_OK;
54810a83cb9SPrem Mallappa     default:
54910a83cb9SPrem Mallappa         *data = 0;
55010a83cb9SPrem Mallappa         qemu_log_mask(LOG_UNIMP,
55110a83cb9SPrem Mallappa                       "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
55210a83cb9SPrem Mallappa                       __func__, offset);
55310a83cb9SPrem Mallappa         return MEMTX_OK;
55410a83cb9SPrem Mallappa     }
55510a83cb9SPrem Mallappa }
55610a83cb9SPrem Mallappa 
55710a83cb9SPrem Mallappa static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
55810a83cb9SPrem Mallappa                                   unsigned size, MemTxAttrs attrs)
55910a83cb9SPrem Mallappa {
56010a83cb9SPrem Mallappa     SMMUState *sys = opaque;
56110a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(sys);
56210a83cb9SPrem Mallappa     MemTxResult r;
56310a83cb9SPrem Mallappa 
56410a83cb9SPrem Mallappa     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
56510a83cb9SPrem Mallappa     offset &= ~0x10000;
56610a83cb9SPrem Mallappa 
56710a83cb9SPrem Mallappa     switch (size) {
56810a83cb9SPrem Mallappa     case 8:
56910a83cb9SPrem Mallappa         r = smmu_readll(s, offset, data, attrs);
57010a83cb9SPrem Mallappa         break;
57110a83cb9SPrem Mallappa     case 4:
57210a83cb9SPrem Mallappa         r = smmu_readl(s, offset, data, attrs);
57310a83cb9SPrem Mallappa         break;
57410a83cb9SPrem Mallappa     default:
57510a83cb9SPrem Mallappa         r = MEMTX_ERROR;
57610a83cb9SPrem Mallappa         break;
57710a83cb9SPrem Mallappa     }
57810a83cb9SPrem Mallappa 
57910a83cb9SPrem Mallappa     trace_smmuv3_read_mmio(offset, *data, size, r);
58010a83cb9SPrem Mallappa     return r;
58110a83cb9SPrem Mallappa }
58210a83cb9SPrem Mallappa 
58310a83cb9SPrem Mallappa static const MemoryRegionOps smmu_mem_ops = {
58410a83cb9SPrem Mallappa     .read_with_attrs = smmu_read_mmio,
58510a83cb9SPrem Mallappa     .write_with_attrs = smmu_write_mmio,
58610a83cb9SPrem Mallappa     .endianness = DEVICE_LITTLE_ENDIAN,
58710a83cb9SPrem Mallappa     .valid = {
58810a83cb9SPrem Mallappa         .min_access_size = 4,
58910a83cb9SPrem Mallappa         .max_access_size = 8,
59010a83cb9SPrem Mallappa     },
59110a83cb9SPrem Mallappa     .impl = {
59210a83cb9SPrem Mallappa         .min_access_size = 4,
59310a83cb9SPrem Mallappa         .max_access_size = 8,
59410a83cb9SPrem Mallappa     },
59510a83cb9SPrem Mallappa };
59610a83cb9SPrem Mallappa 
59710a83cb9SPrem Mallappa static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
59810a83cb9SPrem Mallappa {
59910a83cb9SPrem Mallappa     int i;
60010a83cb9SPrem Mallappa 
60110a83cb9SPrem Mallappa     for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
60210a83cb9SPrem Mallappa         sysbus_init_irq(dev, &s->irq[i]);
60310a83cb9SPrem Mallappa     }
60410a83cb9SPrem Mallappa }
60510a83cb9SPrem Mallappa 
60610a83cb9SPrem Mallappa static void smmu_reset(DeviceState *dev)
60710a83cb9SPrem Mallappa {
60810a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(dev);
60910a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
61010a83cb9SPrem Mallappa 
61110a83cb9SPrem Mallappa     c->parent_reset(dev);
61210a83cb9SPrem Mallappa 
61310a83cb9SPrem Mallappa     smmuv3_init_regs(s);
61410a83cb9SPrem Mallappa }
61510a83cb9SPrem Mallappa 
61610a83cb9SPrem Mallappa static void smmu_realize(DeviceState *d, Error **errp)
61710a83cb9SPrem Mallappa {
61810a83cb9SPrem Mallappa     SMMUState *sys = ARM_SMMU(d);
61910a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(sys);
62010a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
62110a83cb9SPrem Mallappa     SysBusDevice *dev = SYS_BUS_DEVICE(d);
62210a83cb9SPrem Mallappa     Error *local_err = NULL;
62310a83cb9SPrem Mallappa 
62410a83cb9SPrem Mallappa     c->parent_realize(d, &local_err);
62510a83cb9SPrem Mallappa     if (local_err) {
62610a83cb9SPrem Mallappa         error_propagate(errp, local_err);
62710a83cb9SPrem Mallappa         return;
62810a83cb9SPrem Mallappa     }
62910a83cb9SPrem Mallappa 
63010a83cb9SPrem Mallappa     memory_region_init_io(&sys->iomem, OBJECT(s),
63110a83cb9SPrem Mallappa                           &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
63210a83cb9SPrem Mallappa 
63310a83cb9SPrem Mallappa     sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
63410a83cb9SPrem Mallappa 
63510a83cb9SPrem Mallappa     sysbus_init_mmio(dev, &sys->iomem);
63610a83cb9SPrem Mallappa 
63710a83cb9SPrem Mallappa     smmu_init_irq(s, dev);
63810a83cb9SPrem Mallappa }
63910a83cb9SPrem Mallappa 
64010a83cb9SPrem Mallappa static const VMStateDescription vmstate_smmuv3_queue = {
64110a83cb9SPrem Mallappa     .name = "smmuv3_queue",
64210a83cb9SPrem Mallappa     .version_id = 1,
64310a83cb9SPrem Mallappa     .minimum_version_id = 1,
64410a83cb9SPrem Mallappa     .fields = (VMStateField[]) {
64510a83cb9SPrem Mallappa         VMSTATE_UINT64(base, SMMUQueue),
64610a83cb9SPrem Mallappa         VMSTATE_UINT32(prod, SMMUQueue),
64710a83cb9SPrem Mallappa         VMSTATE_UINT32(cons, SMMUQueue),
64810a83cb9SPrem Mallappa         VMSTATE_UINT8(log2size, SMMUQueue),
64910a83cb9SPrem Mallappa     },
65010a83cb9SPrem Mallappa };
65110a83cb9SPrem Mallappa 
65210a83cb9SPrem Mallappa static const VMStateDescription vmstate_smmuv3 = {
65310a83cb9SPrem Mallappa     .name = "smmuv3",
65410a83cb9SPrem Mallappa     .version_id = 1,
65510a83cb9SPrem Mallappa     .minimum_version_id = 1,
65610a83cb9SPrem Mallappa     .fields = (VMStateField[]) {
65710a83cb9SPrem Mallappa         VMSTATE_UINT32(features, SMMUv3State),
65810a83cb9SPrem Mallappa         VMSTATE_UINT8(sid_size, SMMUv3State),
65910a83cb9SPrem Mallappa         VMSTATE_UINT8(sid_split, SMMUv3State),
66010a83cb9SPrem Mallappa 
66110a83cb9SPrem Mallappa         VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
66210a83cb9SPrem Mallappa         VMSTATE_UINT32(cr0ack, SMMUv3State),
66310a83cb9SPrem Mallappa         VMSTATE_UINT32(statusr, SMMUv3State),
66410a83cb9SPrem Mallappa         VMSTATE_UINT32(irq_ctrl, SMMUv3State),
66510a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror, SMMUv3State),
66610a83cb9SPrem Mallappa         VMSTATE_UINT32(gerrorn, SMMUv3State),
66710a83cb9SPrem Mallappa         VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
66810a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
66910a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
67010a83cb9SPrem Mallappa         VMSTATE_UINT64(strtab_base, SMMUv3State),
67110a83cb9SPrem Mallappa         VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
67210a83cb9SPrem Mallappa         VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
67310a83cb9SPrem Mallappa         VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
67410a83cb9SPrem Mallappa         VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
67510a83cb9SPrem Mallappa 
67610a83cb9SPrem Mallappa         VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
67710a83cb9SPrem Mallappa         VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
67810a83cb9SPrem Mallappa 
67910a83cb9SPrem Mallappa         VMSTATE_END_OF_LIST(),
68010a83cb9SPrem Mallappa     },
68110a83cb9SPrem Mallappa };
68210a83cb9SPrem Mallappa 
68310a83cb9SPrem Mallappa static void smmuv3_instance_init(Object *obj)
68410a83cb9SPrem Mallappa {
68510a83cb9SPrem Mallappa     /* Nothing much to do here as of now */
68610a83cb9SPrem Mallappa }
68710a83cb9SPrem Mallappa 
68810a83cb9SPrem Mallappa static void smmuv3_class_init(ObjectClass *klass, void *data)
68910a83cb9SPrem Mallappa {
69010a83cb9SPrem Mallappa     DeviceClass *dc = DEVICE_CLASS(klass);
69110a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
69210a83cb9SPrem Mallappa 
69310a83cb9SPrem Mallappa     dc->vmsd = &vmstate_smmuv3;
69410a83cb9SPrem Mallappa     device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
69510a83cb9SPrem Mallappa     c->parent_realize = dc->realize;
69610a83cb9SPrem Mallappa     dc->realize = smmu_realize;
69710a83cb9SPrem Mallappa }
69810a83cb9SPrem Mallappa 
69910a83cb9SPrem Mallappa static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
70010a83cb9SPrem Mallappa                                                   void *data)
70110a83cb9SPrem Mallappa {
70210a83cb9SPrem Mallappa }
70310a83cb9SPrem Mallappa 
70410a83cb9SPrem Mallappa static const TypeInfo smmuv3_type_info = {
70510a83cb9SPrem Mallappa     .name          = TYPE_ARM_SMMUV3,
70610a83cb9SPrem Mallappa     .parent        = TYPE_ARM_SMMU,
70710a83cb9SPrem Mallappa     .instance_size = sizeof(SMMUv3State),
70810a83cb9SPrem Mallappa     .instance_init = smmuv3_instance_init,
70910a83cb9SPrem Mallappa     .class_size    = sizeof(SMMUv3Class),
71010a83cb9SPrem Mallappa     .class_init    = smmuv3_class_init,
71110a83cb9SPrem Mallappa };
71210a83cb9SPrem Mallappa 
71310a83cb9SPrem Mallappa static const TypeInfo smmuv3_iommu_memory_region_info = {
71410a83cb9SPrem Mallappa     .parent = TYPE_IOMMU_MEMORY_REGION,
71510a83cb9SPrem Mallappa     .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
71610a83cb9SPrem Mallappa     .class_init = smmuv3_iommu_memory_region_class_init,
71710a83cb9SPrem Mallappa };
71810a83cb9SPrem Mallappa 
71910a83cb9SPrem Mallappa static void smmuv3_register_types(void)
72010a83cb9SPrem Mallappa {
72110a83cb9SPrem Mallappa     type_register(&smmuv3_type_info);
72210a83cb9SPrem Mallappa     type_register(&smmuv3_iommu_memory_region_info);
72310a83cb9SPrem Mallappa }
72410a83cb9SPrem Mallappa 
72510a83cb9SPrem Mallappa type_init(smmuv3_register_types)
72610a83cb9SPrem Mallappa 
727