xref: /qemu/hw/arm/smmuv3.c (revision bb981004eaf4bab2c8ae4feaaf6ead8be7275044)
110a83cb9SPrem Mallappa /*
210a83cb9SPrem Mallappa  * Copyright (C) 2014-2016 Broadcom Corporation
310a83cb9SPrem Mallappa  * Copyright (c) 2017 Red Hat, Inc.
410a83cb9SPrem Mallappa  * Written by Prem Mallappa, Eric Auger
510a83cb9SPrem Mallappa  *
610a83cb9SPrem Mallappa  * This program is free software; you can redistribute it and/or modify
710a83cb9SPrem Mallappa  * it under the terms of the GNU General Public License version 2 as
810a83cb9SPrem Mallappa  * published by the Free Software Foundation.
910a83cb9SPrem Mallappa  *
1010a83cb9SPrem Mallappa  * This program is distributed in the hope that it will be useful,
1110a83cb9SPrem Mallappa  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1210a83cb9SPrem Mallappa  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1310a83cb9SPrem Mallappa  * GNU General Public License for more details.
1410a83cb9SPrem Mallappa  *
1510a83cb9SPrem Mallappa  * You should have received a copy of the GNU General Public License along
1610a83cb9SPrem Mallappa  * with this program; if not, see <http://www.gnu.org/licenses/>.
1710a83cb9SPrem Mallappa  */
1810a83cb9SPrem Mallappa 
1910a83cb9SPrem Mallappa #include "qemu/osdep.h"
2010a83cb9SPrem Mallappa #include "hw/boards.h"
2110a83cb9SPrem Mallappa #include "sysemu/sysemu.h"
2210a83cb9SPrem Mallappa #include "hw/sysbus.h"
2310a83cb9SPrem Mallappa #include "hw/qdev-core.h"
2410a83cb9SPrem Mallappa #include "hw/pci/pci.h"
2510a83cb9SPrem Mallappa #include "exec/address-spaces.h"
2610a83cb9SPrem Mallappa #include "trace.h"
2710a83cb9SPrem Mallappa #include "qemu/log.h"
2810a83cb9SPrem Mallappa #include "qemu/error-report.h"
2910a83cb9SPrem Mallappa #include "qapi/error.h"
3010a83cb9SPrem Mallappa 
3110a83cb9SPrem Mallappa #include "hw/arm/smmuv3.h"
3210a83cb9SPrem Mallappa #include "smmuv3-internal.h"
3310a83cb9SPrem Mallappa 
346a736033SEric Auger /**
356a736033SEric Auger  * smmuv3_trigger_irq - pulse @irq if enabled and update
366a736033SEric Auger  * GERROR register in case of GERROR interrupt
376a736033SEric Auger  *
386a736033SEric Auger  * @irq: irq type
396a736033SEric Auger  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
406a736033SEric Auger  */
41fae4be38SEric Auger static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
42fae4be38SEric Auger                                uint32_t gerror_mask)
436a736033SEric Auger {
446a736033SEric Auger 
456a736033SEric Auger     bool pulse = false;
466a736033SEric Auger 
476a736033SEric Auger     switch (irq) {
486a736033SEric Auger     case SMMU_IRQ_EVTQ:
496a736033SEric Auger         pulse = smmuv3_eventq_irq_enabled(s);
506a736033SEric Auger         break;
516a736033SEric Auger     case SMMU_IRQ_PRIQ:
526a736033SEric Auger         qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
536a736033SEric Auger         break;
546a736033SEric Auger     case SMMU_IRQ_CMD_SYNC:
556a736033SEric Auger         pulse = true;
566a736033SEric Auger         break;
576a736033SEric Auger     case SMMU_IRQ_GERROR:
586a736033SEric Auger     {
596a736033SEric Auger         uint32_t pending = s->gerror ^ s->gerrorn;
606a736033SEric Auger         uint32_t new_gerrors = ~pending & gerror_mask;
616a736033SEric Auger 
626a736033SEric Auger         if (!new_gerrors) {
636a736033SEric Auger             /* only toggle non pending errors */
646a736033SEric Auger             return;
656a736033SEric Auger         }
666a736033SEric Auger         s->gerror ^= new_gerrors;
676a736033SEric Auger         trace_smmuv3_write_gerror(new_gerrors, s->gerror);
686a736033SEric Auger 
696a736033SEric Auger         pulse = smmuv3_gerror_irq_enabled(s);
706a736033SEric Auger         break;
716a736033SEric Auger     }
726a736033SEric Auger     }
736a736033SEric Auger     if (pulse) {
746a736033SEric Auger             trace_smmuv3_trigger_irq(irq);
756a736033SEric Auger             qemu_irq_pulse(s->irq[irq]);
766a736033SEric Auger     }
776a736033SEric Auger }
786a736033SEric Auger 
79fae4be38SEric Auger static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
806a736033SEric Auger {
816a736033SEric Auger     uint32_t pending = s->gerror ^ s->gerrorn;
826a736033SEric Auger     uint32_t toggled = s->gerrorn ^ new_gerrorn;
836a736033SEric Auger 
846a736033SEric Auger     if (toggled & ~pending) {
856a736033SEric Auger         qemu_log_mask(LOG_GUEST_ERROR,
866a736033SEric Auger                       "guest toggles non pending errors = 0x%x\n",
876a736033SEric Auger                       toggled & ~pending);
886a736033SEric Auger     }
896a736033SEric Auger 
906a736033SEric Auger     /*
916a736033SEric Auger      * We do not raise any error in case guest toggles bits corresponding
926a736033SEric Auger      * to not active IRQs (CONSTRAINED UNPREDICTABLE)
936a736033SEric Auger      */
946a736033SEric Auger     s->gerrorn = new_gerrorn;
956a736033SEric Auger 
966a736033SEric Auger     trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
976a736033SEric Auger }
986a736033SEric Auger 
99dadd1a08SEric Auger static inline MemTxResult queue_read(SMMUQueue *q, void *data)
100dadd1a08SEric Auger {
101dadd1a08SEric Auger     dma_addr_t addr = Q_CONS_ENTRY(q);
102dadd1a08SEric Auger 
103dadd1a08SEric Auger     return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
104dadd1a08SEric Auger }
105dadd1a08SEric Auger 
106dadd1a08SEric Auger static MemTxResult queue_write(SMMUQueue *q, void *data)
107dadd1a08SEric Auger {
108dadd1a08SEric Auger     dma_addr_t addr = Q_PROD_ENTRY(q);
109dadd1a08SEric Auger     MemTxResult ret;
110dadd1a08SEric Auger 
111dadd1a08SEric Auger     ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
112dadd1a08SEric Auger     if (ret != MEMTX_OK) {
113dadd1a08SEric Auger         return ret;
114dadd1a08SEric Auger     }
115dadd1a08SEric Auger 
116dadd1a08SEric Auger     queue_prod_incr(q);
117dadd1a08SEric Auger     return MEMTX_OK;
118dadd1a08SEric Auger }
119dadd1a08SEric Auger 
120*bb981004SEric Auger static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
121dadd1a08SEric Auger {
122dadd1a08SEric Auger     SMMUQueue *q = &s->eventq;
123*bb981004SEric Auger     MemTxResult r;
124*bb981004SEric Auger 
125*bb981004SEric Auger     if (!smmuv3_eventq_enabled(s)) {
126*bb981004SEric Auger         return MEMTX_ERROR;
127*bb981004SEric Auger     }
128*bb981004SEric Auger 
129*bb981004SEric Auger     if (smmuv3_q_full(q)) {
130*bb981004SEric Auger         return MEMTX_ERROR;
131*bb981004SEric Auger     }
132*bb981004SEric Auger 
133*bb981004SEric Auger     r = queue_write(q, evt);
134*bb981004SEric Auger     if (r != MEMTX_OK) {
135*bb981004SEric Auger         return r;
136*bb981004SEric Auger     }
137*bb981004SEric Auger 
138*bb981004SEric Auger     if (smmuv3_q_empty(q)) {
139*bb981004SEric Auger         smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
140*bb981004SEric Auger     }
141*bb981004SEric Auger     return MEMTX_OK;
142*bb981004SEric Auger }
143*bb981004SEric Auger 
144*bb981004SEric Auger void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
145*bb981004SEric Auger {
146*bb981004SEric Auger     Evt evt;
147*bb981004SEric Auger     MemTxResult r;
148dadd1a08SEric Auger 
149dadd1a08SEric Auger     if (!smmuv3_eventq_enabled(s)) {
150dadd1a08SEric Auger         return;
151dadd1a08SEric Auger     }
152dadd1a08SEric Auger 
153*bb981004SEric Auger     EVT_SET_TYPE(&evt, info->type);
154*bb981004SEric Auger     EVT_SET_SID(&evt, info->sid);
155*bb981004SEric Auger 
156*bb981004SEric Auger     switch (info->type) {
157*bb981004SEric Auger     case SMMU_EVT_OK:
158dadd1a08SEric Auger         return;
159*bb981004SEric Auger     case SMMU_EVT_F_UUT:
160*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.f_uut.ssid);
161*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
162*bb981004SEric Auger         EVT_SET_ADDR(&evt, info->u.f_uut.addr);
163*bb981004SEric Auger         EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
164*bb981004SEric Auger         EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
165*bb981004SEric Auger         EVT_SET_IND(&evt,  info->u.f_uut.ind);
166*bb981004SEric Auger         break;
167*bb981004SEric Auger     case SMMU_EVT_C_BAD_STREAMID:
168*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
169*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
170*bb981004SEric Auger         break;
171*bb981004SEric Auger     case SMMU_EVT_F_STE_FETCH:
172*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
173*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
174*bb981004SEric Auger         EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
175*bb981004SEric Auger         break;
176*bb981004SEric Auger     case SMMU_EVT_C_BAD_STE:
177*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
178*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
179*bb981004SEric Auger         break;
180*bb981004SEric Auger     case SMMU_EVT_F_STREAM_DISABLED:
181*bb981004SEric Auger         break;
182*bb981004SEric Auger     case SMMU_EVT_F_TRANS_FORBIDDEN:
183*bb981004SEric Auger         EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
184*bb981004SEric Auger         EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
185*bb981004SEric Auger         break;
186*bb981004SEric Auger     case SMMU_EVT_C_BAD_SUBSTREAMID:
187*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
188*bb981004SEric Auger         break;
189*bb981004SEric Auger     case SMMU_EVT_F_CD_FETCH:
190*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
191*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
192*bb981004SEric Auger         EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
193*bb981004SEric Auger         break;
194*bb981004SEric Auger     case SMMU_EVT_C_BAD_CD:
195*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
196*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
197*bb981004SEric Auger         break;
198*bb981004SEric Auger     case SMMU_EVT_F_WALK_EABT:
199*bb981004SEric Auger     case SMMU_EVT_F_TRANSLATION:
200*bb981004SEric Auger     case SMMU_EVT_F_ADDR_SIZE:
201*bb981004SEric Auger     case SMMU_EVT_F_ACCESS:
202*bb981004SEric Auger     case SMMU_EVT_F_PERMISSION:
203*bb981004SEric Auger         EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
204*bb981004SEric Auger         EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
205*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
206*bb981004SEric Auger         EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
207*bb981004SEric Auger         EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
208*bb981004SEric Auger         EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
209*bb981004SEric Auger         EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
210*bb981004SEric Auger         EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
211*bb981004SEric Auger         EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
212*bb981004SEric Auger         EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
213*bb981004SEric Auger         EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
214*bb981004SEric Auger         break;
215*bb981004SEric Auger     case SMMU_EVT_F_CFG_CONFLICT:
216*bb981004SEric Auger         EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
217*bb981004SEric Auger         EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
218*bb981004SEric Auger         break;
219*bb981004SEric Auger     /* rest is not implemented */
220*bb981004SEric Auger     case SMMU_EVT_F_BAD_ATS_TREQ:
221*bb981004SEric Auger     case SMMU_EVT_F_TLB_CONFLICT:
222*bb981004SEric Auger     case SMMU_EVT_E_PAGE_REQ:
223*bb981004SEric Auger     default:
224*bb981004SEric Auger         g_assert_not_reached();
225dadd1a08SEric Auger     }
226dadd1a08SEric Auger 
227*bb981004SEric Auger     trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
228*bb981004SEric Auger     r = smmuv3_write_eventq(s, &evt);
229*bb981004SEric Auger     if (r != MEMTX_OK) {
230*bb981004SEric Auger         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
231dadd1a08SEric Auger     }
232*bb981004SEric Auger     info->recorded = true;
233dadd1a08SEric Auger }
234dadd1a08SEric Auger 
23510a83cb9SPrem Mallappa static void smmuv3_init_regs(SMMUv3State *s)
23610a83cb9SPrem Mallappa {
23710a83cb9SPrem Mallappa     /**
23810a83cb9SPrem Mallappa      * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
23910a83cb9SPrem Mallappa      *       multi-level stream table
24010a83cb9SPrem Mallappa      */
24110a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
24210a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
24310a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
24410a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
24510a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
24610a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
24710a83cb9SPrem Mallappa     /* terminated transaction will always be aborted/error returned */
24810a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
24910a83cb9SPrem Mallappa     /* 2-level stream table supported */
25010a83cb9SPrem Mallappa     s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
25110a83cb9SPrem Mallappa 
25210a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
25310a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
25410a83cb9SPrem Mallappa     s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
25510a83cb9SPrem Mallappa 
25610a83cb9SPrem Mallappa    /* 4K and 64K granule support */
25710a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
25810a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
25910a83cb9SPrem Mallappa     s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
26010a83cb9SPrem Mallappa 
26110a83cb9SPrem Mallappa     s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
26210a83cb9SPrem Mallappa     s->cmdq.prod = 0;
26310a83cb9SPrem Mallappa     s->cmdq.cons = 0;
26410a83cb9SPrem Mallappa     s->cmdq.entry_size = sizeof(struct Cmd);
26510a83cb9SPrem Mallappa     s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
26610a83cb9SPrem Mallappa     s->eventq.prod = 0;
26710a83cb9SPrem Mallappa     s->eventq.cons = 0;
26810a83cb9SPrem Mallappa     s->eventq.entry_size = sizeof(struct Evt);
26910a83cb9SPrem Mallappa 
27010a83cb9SPrem Mallappa     s->features = 0;
27110a83cb9SPrem Mallappa     s->sid_split = 0;
27210a83cb9SPrem Mallappa }
27310a83cb9SPrem Mallappa 
274fae4be38SEric Auger static int smmuv3_cmdq_consume(SMMUv3State *s)
275dadd1a08SEric Auger {
276dadd1a08SEric Auger     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
277dadd1a08SEric Auger     SMMUQueue *q = &s->cmdq;
278dadd1a08SEric Auger     SMMUCommandType type = 0;
279dadd1a08SEric Auger 
280dadd1a08SEric Auger     if (!smmuv3_cmdq_enabled(s)) {
281dadd1a08SEric Auger         return 0;
282dadd1a08SEric Auger     }
283dadd1a08SEric Auger     /*
284dadd1a08SEric Auger      * some commands depend on register values, typically CR0. In case those
285dadd1a08SEric Auger      * register values change while handling the command, spec says it
286dadd1a08SEric Auger      * is UNPREDICTABLE whether the command is interpreted under the new
287dadd1a08SEric Auger      * or old value.
288dadd1a08SEric Auger      */
289dadd1a08SEric Auger 
290dadd1a08SEric Auger     while (!smmuv3_q_empty(q)) {
291dadd1a08SEric Auger         uint32_t pending = s->gerror ^ s->gerrorn;
292dadd1a08SEric Auger         Cmd cmd;
293dadd1a08SEric Auger 
294dadd1a08SEric Auger         trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
295dadd1a08SEric Auger                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
296dadd1a08SEric Auger 
297dadd1a08SEric Auger         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
298dadd1a08SEric Auger             break;
299dadd1a08SEric Auger         }
300dadd1a08SEric Auger 
301dadd1a08SEric Auger         if (queue_read(q, &cmd) != MEMTX_OK) {
302dadd1a08SEric Auger             cmd_error = SMMU_CERROR_ABT;
303dadd1a08SEric Auger             break;
304dadd1a08SEric Auger         }
305dadd1a08SEric Auger 
306dadd1a08SEric Auger         type = CMD_TYPE(&cmd);
307dadd1a08SEric Auger 
308dadd1a08SEric Auger         trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
309dadd1a08SEric Auger 
310dadd1a08SEric Auger         switch (type) {
311dadd1a08SEric Auger         case SMMU_CMD_SYNC:
312dadd1a08SEric Auger             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
313dadd1a08SEric Auger                 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
314dadd1a08SEric Auger             }
315dadd1a08SEric Auger             break;
316dadd1a08SEric Auger         case SMMU_CMD_PREFETCH_CONFIG:
317dadd1a08SEric Auger         case SMMU_CMD_PREFETCH_ADDR:
318dadd1a08SEric Auger         case SMMU_CMD_CFGI_STE:
319dadd1a08SEric Auger         case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
320dadd1a08SEric Auger         case SMMU_CMD_CFGI_CD:
321dadd1a08SEric Auger         case SMMU_CMD_CFGI_CD_ALL:
322dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_ALL:
323dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_ASID:
324dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_VA:
325dadd1a08SEric Auger         case SMMU_CMD_TLBI_NH_VAA:
326dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL3_ALL:
327dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL3_VA:
328dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_ALL:
329dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_ASID:
330dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_VA:
331dadd1a08SEric Auger         case SMMU_CMD_TLBI_EL2_VAA:
332dadd1a08SEric Auger         case SMMU_CMD_TLBI_S12_VMALL:
333dadd1a08SEric Auger         case SMMU_CMD_TLBI_S2_IPA:
334dadd1a08SEric Auger         case SMMU_CMD_TLBI_NSNH_ALL:
335dadd1a08SEric Auger         case SMMU_CMD_ATC_INV:
336dadd1a08SEric Auger         case SMMU_CMD_PRI_RESP:
337dadd1a08SEric Auger         case SMMU_CMD_RESUME:
338dadd1a08SEric Auger         case SMMU_CMD_STALL_TERM:
339dadd1a08SEric Auger             trace_smmuv3_unhandled_cmd(type);
340dadd1a08SEric Auger             break;
341dadd1a08SEric Auger         default:
342dadd1a08SEric Auger             cmd_error = SMMU_CERROR_ILL;
343dadd1a08SEric Auger             qemu_log_mask(LOG_GUEST_ERROR,
344dadd1a08SEric Auger                           "Illegal command type: %d\n", CMD_TYPE(&cmd));
345dadd1a08SEric Auger             break;
346dadd1a08SEric Auger         }
347dadd1a08SEric Auger         if (cmd_error) {
348dadd1a08SEric Auger             break;
349dadd1a08SEric Auger         }
350dadd1a08SEric Auger         /*
351dadd1a08SEric Auger          * We only increment the cons index after the completion of
352dadd1a08SEric Auger          * the command. We do that because the SYNC returns immediately
353dadd1a08SEric Auger          * and does not check the completion of previous commands
354dadd1a08SEric Auger          */
355dadd1a08SEric Auger         queue_cons_incr(q);
356dadd1a08SEric Auger     }
357dadd1a08SEric Auger 
358dadd1a08SEric Auger     if (cmd_error) {
359dadd1a08SEric Auger         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
360dadd1a08SEric Auger         smmu_write_cmdq_err(s, cmd_error);
361dadd1a08SEric Auger         smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
362dadd1a08SEric Auger     }
363dadd1a08SEric Auger 
364dadd1a08SEric Auger     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
365dadd1a08SEric Auger                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
366dadd1a08SEric Auger 
367dadd1a08SEric Auger     return 0;
368dadd1a08SEric Auger }
369dadd1a08SEric Auger 
370fae4be38SEric Auger static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
371fae4be38SEric Auger                                uint64_t data, MemTxAttrs attrs)
372fae4be38SEric Auger {
373fae4be38SEric Auger     switch (offset) {
374fae4be38SEric Auger     case A_GERROR_IRQ_CFG0:
375fae4be38SEric Auger         s->gerror_irq_cfg0 = data;
376fae4be38SEric Auger         return MEMTX_OK;
377fae4be38SEric Auger     case A_STRTAB_BASE:
378fae4be38SEric Auger         s->strtab_base = data;
379fae4be38SEric Auger         return MEMTX_OK;
380fae4be38SEric Auger     case A_CMDQ_BASE:
381fae4be38SEric Auger         s->cmdq.base = data;
382fae4be38SEric Auger         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
383fae4be38SEric Auger         if (s->cmdq.log2size > SMMU_CMDQS) {
384fae4be38SEric Auger             s->cmdq.log2size = SMMU_CMDQS;
385fae4be38SEric Auger         }
386fae4be38SEric Auger         return MEMTX_OK;
387fae4be38SEric Auger     case A_EVENTQ_BASE:
388fae4be38SEric Auger         s->eventq.base = data;
389fae4be38SEric Auger         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
390fae4be38SEric Auger         if (s->eventq.log2size > SMMU_EVENTQS) {
391fae4be38SEric Auger             s->eventq.log2size = SMMU_EVENTQS;
392fae4be38SEric Auger         }
393fae4be38SEric Auger         return MEMTX_OK;
394fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0:
395fae4be38SEric Auger         s->eventq_irq_cfg0 = data;
396fae4be38SEric Auger         return MEMTX_OK;
397fae4be38SEric Auger     default:
398fae4be38SEric Auger         qemu_log_mask(LOG_UNIMP,
399fae4be38SEric Auger                       "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
400fae4be38SEric Auger                       __func__, offset);
401fae4be38SEric Auger         return MEMTX_OK;
402fae4be38SEric Auger     }
403fae4be38SEric Auger }
404fae4be38SEric Auger 
405fae4be38SEric Auger static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
406fae4be38SEric Auger                                uint64_t data, MemTxAttrs attrs)
407fae4be38SEric Auger {
408fae4be38SEric Auger     switch (offset) {
409fae4be38SEric Auger     case A_CR0:
410fae4be38SEric Auger         s->cr[0] = data;
411fae4be38SEric Auger         s->cr0ack = data & ~SMMU_CR0_RESERVED;
412fae4be38SEric Auger         /* in case the command queue has been enabled */
413fae4be38SEric Auger         smmuv3_cmdq_consume(s);
414fae4be38SEric Auger         return MEMTX_OK;
415fae4be38SEric Auger     case A_CR1:
416fae4be38SEric Auger         s->cr[1] = data;
417fae4be38SEric Auger         return MEMTX_OK;
418fae4be38SEric Auger     case A_CR2:
419fae4be38SEric Auger         s->cr[2] = data;
420fae4be38SEric Auger         return MEMTX_OK;
421fae4be38SEric Auger     case A_IRQ_CTRL:
422fae4be38SEric Auger         s->irq_ctrl = data;
423fae4be38SEric Auger         return MEMTX_OK;
424fae4be38SEric Auger     case A_GERRORN:
425fae4be38SEric Auger         smmuv3_write_gerrorn(s, data);
426fae4be38SEric Auger         /*
427fae4be38SEric Auger          * By acknowledging the CMDQ_ERR, SW may notify cmds can
428fae4be38SEric Auger          * be processed again
429fae4be38SEric Auger          */
430fae4be38SEric Auger         smmuv3_cmdq_consume(s);
431fae4be38SEric Auger         return MEMTX_OK;
432fae4be38SEric Auger     case A_GERROR_IRQ_CFG0: /* 64b */
433fae4be38SEric Auger         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
434fae4be38SEric Auger         return MEMTX_OK;
435fae4be38SEric Auger     case A_GERROR_IRQ_CFG0 + 4:
436fae4be38SEric Auger         s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
437fae4be38SEric Auger         return MEMTX_OK;
438fae4be38SEric Auger     case A_GERROR_IRQ_CFG1:
439fae4be38SEric Auger         s->gerror_irq_cfg1 = data;
440fae4be38SEric Auger         return MEMTX_OK;
441fae4be38SEric Auger     case A_GERROR_IRQ_CFG2:
442fae4be38SEric Auger         s->gerror_irq_cfg2 = data;
443fae4be38SEric Auger         return MEMTX_OK;
444fae4be38SEric Auger     case A_STRTAB_BASE: /* 64b */
445fae4be38SEric Auger         s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
446fae4be38SEric Auger         return MEMTX_OK;
447fae4be38SEric Auger     case A_STRTAB_BASE + 4:
448fae4be38SEric Auger         s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
449fae4be38SEric Auger         return MEMTX_OK;
450fae4be38SEric Auger     case A_STRTAB_BASE_CFG:
451fae4be38SEric Auger         s->strtab_base_cfg = data;
452fae4be38SEric Auger         if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
453fae4be38SEric Auger             s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
454fae4be38SEric Auger             s->features |= SMMU_FEATURE_2LVL_STE;
455fae4be38SEric Auger         }
456fae4be38SEric Auger         return MEMTX_OK;
457fae4be38SEric Auger     case A_CMDQ_BASE: /* 64b */
458fae4be38SEric Auger         s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
459fae4be38SEric Auger         s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
460fae4be38SEric Auger         if (s->cmdq.log2size > SMMU_CMDQS) {
461fae4be38SEric Auger             s->cmdq.log2size = SMMU_CMDQS;
462fae4be38SEric Auger         }
463fae4be38SEric Auger         return MEMTX_OK;
464fae4be38SEric Auger     case A_CMDQ_BASE + 4: /* 64b */
465fae4be38SEric Auger         s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
466fae4be38SEric Auger         return MEMTX_OK;
467fae4be38SEric Auger     case A_CMDQ_PROD:
468fae4be38SEric Auger         s->cmdq.prod = data;
469fae4be38SEric Auger         smmuv3_cmdq_consume(s);
470fae4be38SEric Auger         return MEMTX_OK;
471fae4be38SEric Auger     case A_CMDQ_CONS:
472fae4be38SEric Auger         s->cmdq.cons = data;
473fae4be38SEric Auger         return MEMTX_OK;
474fae4be38SEric Auger     case A_EVENTQ_BASE: /* 64b */
475fae4be38SEric Auger         s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
476fae4be38SEric Auger         s->eventq.log2size = extract64(s->eventq.base, 0, 5);
477fae4be38SEric Auger         if (s->eventq.log2size > SMMU_EVENTQS) {
478fae4be38SEric Auger             s->eventq.log2size = SMMU_EVENTQS;
479fae4be38SEric Auger         }
480fae4be38SEric Auger         return MEMTX_OK;
481fae4be38SEric Auger     case A_EVENTQ_BASE + 4:
482fae4be38SEric Auger         s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
483fae4be38SEric Auger         return MEMTX_OK;
484fae4be38SEric Auger     case A_EVENTQ_PROD:
485fae4be38SEric Auger         s->eventq.prod = data;
486fae4be38SEric Auger         return MEMTX_OK;
487fae4be38SEric Auger     case A_EVENTQ_CONS:
488fae4be38SEric Auger         s->eventq.cons = data;
489fae4be38SEric Auger         return MEMTX_OK;
490fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0: /* 64b */
491fae4be38SEric Auger         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
492fae4be38SEric Auger         return MEMTX_OK;
493fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG0 + 4:
494fae4be38SEric Auger         s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
495fae4be38SEric Auger         return MEMTX_OK;
496fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG1:
497fae4be38SEric Auger         s->eventq_irq_cfg1 = data;
498fae4be38SEric Auger         return MEMTX_OK;
499fae4be38SEric Auger     case A_EVENTQ_IRQ_CFG2:
500fae4be38SEric Auger         s->eventq_irq_cfg2 = data;
501fae4be38SEric Auger         return MEMTX_OK;
502fae4be38SEric Auger     default:
503fae4be38SEric Auger         qemu_log_mask(LOG_UNIMP,
504fae4be38SEric Auger                       "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
505fae4be38SEric Auger                       __func__, offset);
506fae4be38SEric Auger         return MEMTX_OK;
507fae4be38SEric Auger     }
508fae4be38SEric Auger }
509fae4be38SEric Auger 
51010a83cb9SPrem Mallappa static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
51110a83cb9SPrem Mallappa                                    unsigned size, MemTxAttrs attrs)
51210a83cb9SPrem Mallappa {
513fae4be38SEric Auger     SMMUState *sys = opaque;
514fae4be38SEric Auger     SMMUv3State *s = ARM_SMMUV3(sys);
515fae4be38SEric Auger     MemTxResult r;
516fae4be38SEric Auger 
517fae4be38SEric Auger     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
518fae4be38SEric Auger     offset &= ~0x10000;
519fae4be38SEric Auger 
520fae4be38SEric Auger     switch (size) {
521fae4be38SEric Auger     case 8:
522fae4be38SEric Auger         r = smmu_writell(s, offset, data, attrs);
523fae4be38SEric Auger         break;
524fae4be38SEric Auger     case 4:
525fae4be38SEric Auger         r = smmu_writel(s, offset, data, attrs);
526fae4be38SEric Auger         break;
527fae4be38SEric Auger     default:
528fae4be38SEric Auger         r = MEMTX_ERROR;
529fae4be38SEric Auger         break;
530fae4be38SEric Auger     }
531fae4be38SEric Auger 
532fae4be38SEric Auger     trace_smmuv3_write_mmio(offset, data, size, r);
533fae4be38SEric Auger     return r;
53410a83cb9SPrem Mallappa }
53510a83cb9SPrem Mallappa 
53610a83cb9SPrem Mallappa static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
53710a83cb9SPrem Mallappa                                uint64_t *data, MemTxAttrs attrs)
53810a83cb9SPrem Mallappa {
53910a83cb9SPrem Mallappa     switch (offset) {
54010a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0:
54110a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg0;
54210a83cb9SPrem Mallappa         return MEMTX_OK;
54310a83cb9SPrem Mallappa     case A_STRTAB_BASE:
54410a83cb9SPrem Mallappa         *data = s->strtab_base;
54510a83cb9SPrem Mallappa         return MEMTX_OK;
54610a83cb9SPrem Mallappa     case A_CMDQ_BASE:
54710a83cb9SPrem Mallappa         *data = s->cmdq.base;
54810a83cb9SPrem Mallappa         return MEMTX_OK;
54910a83cb9SPrem Mallappa     case A_EVENTQ_BASE:
55010a83cb9SPrem Mallappa         *data = s->eventq.base;
55110a83cb9SPrem Mallappa         return MEMTX_OK;
55210a83cb9SPrem Mallappa     default:
55310a83cb9SPrem Mallappa         *data = 0;
55410a83cb9SPrem Mallappa         qemu_log_mask(LOG_UNIMP,
55510a83cb9SPrem Mallappa                       "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
55610a83cb9SPrem Mallappa                       __func__, offset);
55710a83cb9SPrem Mallappa         return MEMTX_OK;
55810a83cb9SPrem Mallappa     }
55910a83cb9SPrem Mallappa }
56010a83cb9SPrem Mallappa 
56110a83cb9SPrem Mallappa static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
56210a83cb9SPrem Mallappa                               uint64_t *data, MemTxAttrs attrs)
56310a83cb9SPrem Mallappa {
56410a83cb9SPrem Mallappa     switch (offset) {
56510a83cb9SPrem Mallappa     case A_IDREGS ... A_IDREGS + 0x1f:
56610a83cb9SPrem Mallappa         *data = smmuv3_idreg(offset - A_IDREGS);
56710a83cb9SPrem Mallappa         return MEMTX_OK;
56810a83cb9SPrem Mallappa     case A_IDR0 ... A_IDR5:
56910a83cb9SPrem Mallappa         *data = s->idr[(offset - A_IDR0) / 4];
57010a83cb9SPrem Mallappa         return MEMTX_OK;
57110a83cb9SPrem Mallappa     case A_IIDR:
57210a83cb9SPrem Mallappa         *data = s->iidr;
57310a83cb9SPrem Mallappa         return MEMTX_OK;
57410a83cb9SPrem Mallappa     case A_CR0:
57510a83cb9SPrem Mallappa         *data = s->cr[0];
57610a83cb9SPrem Mallappa         return MEMTX_OK;
57710a83cb9SPrem Mallappa     case A_CR0ACK:
57810a83cb9SPrem Mallappa         *data = s->cr0ack;
57910a83cb9SPrem Mallappa         return MEMTX_OK;
58010a83cb9SPrem Mallappa     case A_CR1:
58110a83cb9SPrem Mallappa         *data = s->cr[1];
58210a83cb9SPrem Mallappa         return MEMTX_OK;
58310a83cb9SPrem Mallappa     case A_CR2:
58410a83cb9SPrem Mallappa         *data = s->cr[2];
58510a83cb9SPrem Mallappa         return MEMTX_OK;
58610a83cb9SPrem Mallappa     case A_STATUSR:
58710a83cb9SPrem Mallappa         *data = s->statusr;
58810a83cb9SPrem Mallappa         return MEMTX_OK;
58910a83cb9SPrem Mallappa     case A_IRQ_CTRL:
59010a83cb9SPrem Mallappa     case A_IRQ_CTRL_ACK:
59110a83cb9SPrem Mallappa         *data = s->irq_ctrl;
59210a83cb9SPrem Mallappa         return MEMTX_OK;
59310a83cb9SPrem Mallappa     case A_GERROR:
59410a83cb9SPrem Mallappa         *data = s->gerror;
59510a83cb9SPrem Mallappa         return MEMTX_OK;
59610a83cb9SPrem Mallappa     case A_GERRORN:
59710a83cb9SPrem Mallappa         *data = s->gerrorn;
59810a83cb9SPrem Mallappa         return MEMTX_OK;
59910a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0: /* 64b */
60010a83cb9SPrem Mallappa         *data = extract64(s->gerror_irq_cfg0, 0, 32);
60110a83cb9SPrem Mallappa         return MEMTX_OK;
60210a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG0 + 4:
60310a83cb9SPrem Mallappa         *data = extract64(s->gerror_irq_cfg0, 32, 32);
60410a83cb9SPrem Mallappa         return MEMTX_OK;
60510a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG1:
60610a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg1;
60710a83cb9SPrem Mallappa         return MEMTX_OK;
60810a83cb9SPrem Mallappa     case A_GERROR_IRQ_CFG2:
60910a83cb9SPrem Mallappa         *data = s->gerror_irq_cfg2;
61010a83cb9SPrem Mallappa         return MEMTX_OK;
61110a83cb9SPrem Mallappa     case A_STRTAB_BASE: /* 64b */
61210a83cb9SPrem Mallappa         *data = extract64(s->strtab_base, 0, 32);
61310a83cb9SPrem Mallappa         return MEMTX_OK;
61410a83cb9SPrem Mallappa     case A_STRTAB_BASE + 4: /* 64b */
61510a83cb9SPrem Mallappa         *data = extract64(s->strtab_base, 32, 32);
61610a83cb9SPrem Mallappa         return MEMTX_OK;
61710a83cb9SPrem Mallappa     case A_STRTAB_BASE_CFG:
61810a83cb9SPrem Mallappa         *data = s->strtab_base_cfg;
61910a83cb9SPrem Mallappa         return MEMTX_OK;
62010a83cb9SPrem Mallappa     case A_CMDQ_BASE: /* 64b */
62110a83cb9SPrem Mallappa         *data = extract64(s->cmdq.base, 0, 32);
62210a83cb9SPrem Mallappa         return MEMTX_OK;
62310a83cb9SPrem Mallappa     case A_CMDQ_BASE + 4:
62410a83cb9SPrem Mallappa         *data = extract64(s->cmdq.base, 32, 32);
62510a83cb9SPrem Mallappa         return MEMTX_OK;
62610a83cb9SPrem Mallappa     case A_CMDQ_PROD:
62710a83cb9SPrem Mallappa         *data = s->cmdq.prod;
62810a83cb9SPrem Mallappa         return MEMTX_OK;
62910a83cb9SPrem Mallappa     case A_CMDQ_CONS:
63010a83cb9SPrem Mallappa         *data = s->cmdq.cons;
63110a83cb9SPrem Mallappa         return MEMTX_OK;
63210a83cb9SPrem Mallappa     case A_EVENTQ_BASE: /* 64b */
63310a83cb9SPrem Mallappa         *data = extract64(s->eventq.base, 0, 32);
63410a83cb9SPrem Mallappa         return MEMTX_OK;
63510a83cb9SPrem Mallappa     case A_EVENTQ_BASE + 4: /* 64b */
63610a83cb9SPrem Mallappa         *data = extract64(s->eventq.base, 32, 32);
63710a83cb9SPrem Mallappa         return MEMTX_OK;
63810a83cb9SPrem Mallappa     case A_EVENTQ_PROD:
63910a83cb9SPrem Mallappa         *data = s->eventq.prod;
64010a83cb9SPrem Mallappa         return MEMTX_OK;
64110a83cb9SPrem Mallappa     case A_EVENTQ_CONS:
64210a83cb9SPrem Mallappa         *data = s->eventq.cons;
64310a83cb9SPrem Mallappa         return MEMTX_OK;
64410a83cb9SPrem Mallappa     default:
64510a83cb9SPrem Mallappa         *data = 0;
64610a83cb9SPrem Mallappa         qemu_log_mask(LOG_UNIMP,
64710a83cb9SPrem Mallappa                       "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
64810a83cb9SPrem Mallappa                       __func__, offset);
64910a83cb9SPrem Mallappa         return MEMTX_OK;
65010a83cb9SPrem Mallappa     }
65110a83cb9SPrem Mallappa }
65210a83cb9SPrem Mallappa 
65310a83cb9SPrem Mallappa static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
65410a83cb9SPrem Mallappa                                   unsigned size, MemTxAttrs attrs)
65510a83cb9SPrem Mallappa {
65610a83cb9SPrem Mallappa     SMMUState *sys = opaque;
65710a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(sys);
65810a83cb9SPrem Mallappa     MemTxResult r;
65910a83cb9SPrem Mallappa 
66010a83cb9SPrem Mallappa     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
66110a83cb9SPrem Mallappa     offset &= ~0x10000;
66210a83cb9SPrem Mallappa 
66310a83cb9SPrem Mallappa     switch (size) {
66410a83cb9SPrem Mallappa     case 8:
66510a83cb9SPrem Mallappa         r = smmu_readll(s, offset, data, attrs);
66610a83cb9SPrem Mallappa         break;
66710a83cb9SPrem Mallappa     case 4:
66810a83cb9SPrem Mallappa         r = smmu_readl(s, offset, data, attrs);
66910a83cb9SPrem Mallappa         break;
67010a83cb9SPrem Mallappa     default:
67110a83cb9SPrem Mallappa         r = MEMTX_ERROR;
67210a83cb9SPrem Mallappa         break;
67310a83cb9SPrem Mallappa     }
67410a83cb9SPrem Mallappa 
67510a83cb9SPrem Mallappa     trace_smmuv3_read_mmio(offset, *data, size, r);
67610a83cb9SPrem Mallappa     return r;
67710a83cb9SPrem Mallappa }
67810a83cb9SPrem Mallappa 
67910a83cb9SPrem Mallappa static const MemoryRegionOps smmu_mem_ops = {
68010a83cb9SPrem Mallappa     .read_with_attrs = smmu_read_mmio,
68110a83cb9SPrem Mallappa     .write_with_attrs = smmu_write_mmio,
68210a83cb9SPrem Mallappa     .endianness = DEVICE_LITTLE_ENDIAN,
68310a83cb9SPrem Mallappa     .valid = {
68410a83cb9SPrem Mallappa         .min_access_size = 4,
68510a83cb9SPrem Mallappa         .max_access_size = 8,
68610a83cb9SPrem Mallappa     },
68710a83cb9SPrem Mallappa     .impl = {
68810a83cb9SPrem Mallappa         .min_access_size = 4,
68910a83cb9SPrem Mallappa         .max_access_size = 8,
69010a83cb9SPrem Mallappa     },
69110a83cb9SPrem Mallappa };
69210a83cb9SPrem Mallappa 
69310a83cb9SPrem Mallappa static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
69410a83cb9SPrem Mallappa {
69510a83cb9SPrem Mallappa     int i;
69610a83cb9SPrem Mallappa 
69710a83cb9SPrem Mallappa     for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
69810a83cb9SPrem Mallappa         sysbus_init_irq(dev, &s->irq[i]);
69910a83cb9SPrem Mallappa     }
70010a83cb9SPrem Mallappa }
70110a83cb9SPrem Mallappa 
70210a83cb9SPrem Mallappa static void smmu_reset(DeviceState *dev)
70310a83cb9SPrem Mallappa {
70410a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(dev);
70510a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
70610a83cb9SPrem Mallappa 
70710a83cb9SPrem Mallappa     c->parent_reset(dev);
70810a83cb9SPrem Mallappa 
70910a83cb9SPrem Mallappa     smmuv3_init_regs(s);
71010a83cb9SPrem Mallappa }
71110a83cb9SPrem Mallappa 
71210a83cb9SPrem Mallappa static void smmu_realize(DeviceState *d, Error **errp)
71310a83cb9SPrem Mallappa {
71410a83cb9SPrem Mallappa     SMMUState *sys = ARM_SMMU(d);
71510a83cb9SPrem Mallappa     SMMUv3State *s = ARM_SMMUV3(sys);
71610a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
71710a83cb9SPrem Mallappa     SysBusDevice *dev = SYS_BUS_DEVICE(d);
71810a83cb9SPrem Mallappa     Error *local_err = NULL;
71910a83cb9SPrem Mallappa 
72010a83cb9SPrem Mallappa     c->parent_realize(d, &local_err);
72110a83cb9SPrem Mallappa     if (local_err) {
72210a83cb9SPrem Mallappa         error_propagate(errp, local_err);
72310a83cb9SPrem Mallappa         return;
72410a83cb9SPrem Mallappa     }
72510a83cb9SPrem Mallappa 
72610a83cb9SPrem Mallappa     memory_region_init_io(&sys->iomem, OBJECT(s),
72710a83cb9SPrem Mallappa                           &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
72810a83cb9SPrem Mallappa 
72910a83cb9SPrem Mallappa     sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
73010a83cb9SPrem Mallappa 
73110a83cb9SPrem Mallappa     sysbus_init_mmio(dev, &sys->iomem);
73210a83cb9SPrem Mallappa 
73310a83cb9SPrem Mallappa     smmu_init_irq(s, dev);
73410a83cb9SPrem Mallappa }
73510a83cb9SPrem Mallappa 
73610a83cb9SPrem Mallappa static const VMStateDescription vmstate_smmuv3_queue = {
73710a83cb9SPrem Mallappa     .name = "smmuv3_queue",
73810a83cb9SPrem Mallappa     .version_id = 1,
73910a83cb9SPrem Mallappa     .minimum_version_id = 1,
74010a83cb9SPrem Mallappa     .fields = (VMStateField[]) {
74110a83cb9SPrem Mallappa         VMSTATE_UINT64(base, SMMUQueue),
74210a83cb9SPrem Mallappa         VMSTATE_UINT32(prod, SMMUQueue),
74310a83cb9SPrem Mallappa         VMSTATE_UINT32(cons, SMMUQueue),
74410a83cb9SPrem Mallappa         VMSTATE_UINT8(log2size, SMMUQueue),
74510a83cb9SPrem Mallappa     },
74610a83cb9SPrem Mallappa };
74710a83cb9SPrem Mallappa 
74810a83cb9SPrem Mallappa static const VMStateDescription vmstate_smmuv3 = {
74910a83cb9SPrem Mallappa     .name = "smmuv3",
75010a83cb9SPrem Mallappa     .version_id = 1,
75110a83cb9SPrem Mallappa     .minimum_version_id = 1,
75210a83cb9SPrem Mallappa     .fields = (VMStateField[]) {
75310a83cb9SPrem Mallappa         VMSTATE_UINT32(features, SMMUv3State),
75410a83cb9SPrem Mallappa         VMSTATE_UINT8(sid_size, SMMUv3State),
75510a83cb9SPrem Mallappa         VMSTATE_UINT8(sid_split, SMMUv3State),
75610a83cb9SPrem Mallappa 
75710a83cb9SPrem Mallappa         VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
75810a83cb9SPrem Mallappa         VMSTATE_UINT32(cr0ack, SMMUv3State),
75910a83cb9SPrem Mallappa         VMSTATE_UINT32(statusr, SMMUv3State),
76010a83cb9SPrem Mallappa         VMSTATE_UINT32(irq_ctrl, SMMUv3State),
76110a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror, SMMUv3State),
76210a83cb9SPrem Mallappa         VMSTATE_UINT32(gerrorn, SMMUv3State),
76310a83cb9SPrem Mallappa         VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
76410a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
76510a83cb9SPrem Mallappa         VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
76610a83cb9SPrem Mallappa         VMSTATE_UINT64(strtab_base, SMMUv3State),
76710a83cb9SPrem Mallappa         VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
76810a83cb9SPrem Mallappa         VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
76910a83cb9SPrem Mallappa         VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
77010a83cb9SPrem Mallappa         VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
77110a83cb9SPrem Mallappa 
77210a83cb9SPrem Mallappa         VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
77310a83cb9SPrem Mallappa         VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
77410a83cb9SPrem Mallappa 
77510a83cb9SPrem Mallappa         VMSTATE_END_OF_LIST(),
77610a83cb9SPrem Mallappa     },
77710a83cb9SPrem Mallappa };
77810a83cb9SPrem Mallappa 
77910a83cb9SPrem Mallappa static void smmuv3_instance_init(Object *obj)
78010a83cb9SPrem Mallappa {
78110a83cb9SPrem Mallappa     /* Nothing much to do here as of now */
78210a83cb9SPrem Mallappa }
78310a83cb9SPrem Mallappa 
78410a83cb9SPrem Mallappa static void smmuv3_class_init(ObjectClass *klass, void *data)
78510a83cb9SPrem Mallappa {
78610a83cb9SPrem Mallappa     DeviceClass *dc = DEVICE_CLASS(klass);
78710a83cb9SPrem Mallappa     SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
78810a83cb9SPrem Mallappa 
78910a83cb9SPrem Mallappa     dc->vmsd = &vmstate_smmuv3;
79010a83cb9SPrem Mallappa     device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
79110a83cb9SPrem Mallappa     c->parent_realize = dc->realize;
79210a83cb9SPrem Mallappa     dc->realize = smmu_realize;
79310a83cb9SPrem Mallappa }
79410a83cb9SPrem Mallappa 
79510a83cb9SPrem Mallappa static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
79610a83cb9SPrem Mallappa                                                   void *data)
79710a83cb9SPrem Mallappa {
79810a83cb9SPrem Mallappa }
79910a83cb9SPrem Mallappa 
80010a83cb9SPrem Mallappa static const TypeInfo smmuv3_type_info = {
80110a83cb9SPrem Mallappa     .name          = TYPE_ARM_SMMUV3,
80210a83cb9SPrem Mallappa     .parent        = TYPE_ARM_SMMU,
80310a83cb9SPrem Mallappa     .instance_size = sizeof(SMMUv3State),
80410a83cb9SPrem Mallappa     .instance_init = smmuv3_instance_init,
80510a83cb9SPrem Mallappa     .class_size    = sizeof(SMMUv3Class),
80610a83cb9SPrem Mallappa     .class_init    = smmuv3_class_init,
80710a83cb9SPrem Mallappa };
80810a83cb9SPrem Mallappa 
80910a83cb9SPrem Mallappa static const TypeInfo smmuv3_iommu_memory_region_info = {
81010a83cb9SPrem Mallappa     .parent = TYPE_IOMMU_MEMORY_REGION,
81110a83cb9SPrem Mallappa     .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
81210a83cb9SPrem Mallappa     .class_init = smmuv3_iommu_memory_region_class_init,
81310a83cb9SPrem Mallappa };
81410a83cb9SPrem Mallappa 
81510a83cb9SPrem Mallappa static void smmuv3_register_types(void)
81610a83cb9SPrem Mallappa {
81710a83cb9SPrem Mallappa     type_register(&smmuv3_type_info);
81810a83cb9SPrem Mallappa     type_register(&smmuv3_iommu_memory_region_info);
81910a83cb9SPrem Mallappa }
82010a83cb9SPrem Mallappa 
82110a83cb9SPrem Mallappa type_init(smmuv3_register_types)
82210a83cb9SPrem Mallappa 
823