xref: /qemu/hw/arm/smmuv3-internal.h (revision fae4be38b35dcfae48494c023454e8988c15b69a)
1 /*
2  * ARM SMMUv3 support - Internal API
3  *
4  * Copyright (C) 2014-2016 Broadcom Corporation
5  * Copyright (c) 2017 Red Hat, Inc.
6  * Written by Prem Mallappa, Eric Auger
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #ifndef HW_ARM_SMMU_V3_INTERNAL_H
22 #define HW_ARM_SMMU_V3_INTERNAL_H
23 
24 #include "hw/arm/smmu-common.h"
25 
26 /* MMIO Registers */
27 
28 REG32(IDR0,                0x0)
29     FIELD(IDR0, S1P,         1 , 1)
30     FIELD(IDR0, TTF,         2 , 2)
31     FIELD(IDR0, COHACC,      4 , 1)
32     FIELD(IDR0, ASID16,      12, 1)
33     FIELD(IDR0, TTENDIAN,    21, 2)
34     FIELD(IDR0, STALL_MODEL, 24, 2)
35     FIELD(IDR0, TERM_MODEL,  26, 1)
36     FIELD(IDR0, STLEVEL,     27, 2)
37 
38 REG32(IDR1,                0x4)
39     FIELD(IDR1, SIDSIZE,      0 , 6)
40     FIELD(IDR1, EVENTQS,      16, 5)
41     FIELD(IDR1, CMDQS,        21, 5)
42 
43 #define SMMU_IDR1_SIDSIZE 16
44 #define SMMU_CMDQS   19
45 #define SMMU_EVENTQS 19
46 
47 REG32(IDR2,                0x8)
48 REG32(IDR3,                0xc)
49 REG32(IDR4,                0x10)
50 REG32(IDR5,                0x14)
51      FIELD(IDR5, OAS,         0, 3);
52      FIELD(IDR5, GRAN4K,      4, 1);
53      FIELD(IDR5, GRAN16K,     5, 1);
54      FIELD(IDR5, GRAN64K,     6, 1);
55 
56 #define SMMU_IDR5_OAS 4
57 
58 REG32(IIDR,                0x1c)
59 REG32(CR0,                 0x20)
60     FIELD(CR0, SMMU_ENABLE,   0, 1)
61     FIELD(CR0, EVENTQEN,      2, 1)
62     FIELD(CR0, CMDQEN,        3, 1)
63 
64 #define SMMU_CR0_RESERVED 0xFFFFFC20
65 
66 REG32(CR0ACK,              0x24)
67 REG32(CR1,                 0x28)
68 REG32(CR2,                 0x2c)
69 REG32(STATUSR,             0x40)
70 REG32(IRQ_CTRL,            0x50)
71     FIELD(IRQ_CTRL, GERROR_IRQEN,        0, 1)
72     FIELD(IRQ_CTRL, PRI_IRQEN,           1, 1)
73     FIELD(IRQ_CTRL, EVENTQ_IRQEN,        2, 1)
74 
75 REG32(IRQ_CTRL_ACK,        0x54)
76 REG32(GERROR,              0x60)
77     FIELD(GERROR, CMDQ_ERR,           0, 1)
78     FIELD(GERROR, EVENTQ_ABT_ERR,     2, 1)
79     FIELD(GERROR, PRIQ_ABT_ERR,       3, 1)
80     FIELD(GERROR, MSI_CMDQ_ABT_ERR,   4, 1)
81     FIELD(GERROR, MSI_EVENTQ_ABT_ERR, 5, 1)
82     FIELD(GERROR, MSI_PRIQ_ABT_ERR,   6, 1)
83     FIELD(GERROR, MSI_GERROR_ABT_ERR, 7, 1)
84     FIELD(GERROR, MSI_SFM_ERR,        8, 1)
85 
86 REG32(GERRORN,             0x64)
87 
88 #define A_GERROR_IRQ_CFG0  0x68 /* 64b */
89 REG32(GERROR_IRQ_CFG1, 0x70)
90 REG32(GERROR_IRQ_CFG2, 0x74)
91 
92 #define A_STRTAB_BASE      0x80 /* 64b */
93 
94 #define SMMU_BASE_ADDR_MASK 0xffffffffffe0
95 
96 REG32(STRTAB_BASE_CFG,     0x88)
97     FIELD(STRTAB_BASE_CFG, FMT,      16, 2)
98     FIELD(STRTAB_BASE_CFG, SPLIT,    6 , 5)
99     FIELD(STRTAB_BASE_CFG, LOG2SIZE, 0 , 6)
100 
101 #define A_CMDQ_BASE        0x90 /* 64b */
102 REG32(CMDQ_PROD,           0x98)
103 REG32(CMDQ_CONS,           0x9c)
104     FIELD(CMDQ_CONS, ERR, 24, 7)
105 
106 #define A_EVENTQ_BASE      0xa0 /* 64b */
107 REG32(EVENTQ_PROD,         0xa8)
108 REG32(EVENTQ_CONS,         0xac)
109 
110 #define A_EVENTQ_IRQ_CFG0  0xb0 /* 64b */
111 REG32(EVENTQ_IRQ_CFG1,     0xb8)
112 REG32(EVENTQ_IRQ_CFG2,     0xbc)
113 
114 #define A_IDREGS           0xfd0
115 
116 static inline int smmu_enabled(SMMUv3State *s)
117 {
118     return FIELD_EX32(s->cr[0], CR0, SMMU_ENABLE);
119 }
120 
121 /* Command Queue Entry */
122 typedef struct Cmd {
123     uint32_t word[4];
124 } Cmd;
125 
126 /* Event Queue Entry */
127 typedef struct Evt  {
128     uint32_t word[8];
129 } Evt;
130 
131 static inline uint32_t smmuv3_idreg(int regoffset)
132 {
133     /*
134      * Return the value of the Primecell/Corelink ID registers at the
135      * specified offset from the first ID register.
136      * These value indicate an ARM implementation of MMU600 p1
137      */
138     static const uint8_t smmuv3_ids[] = {
139         0x04, 0, 0, 0, 0x84, 0xB4, 0xF0, 0x10, 0x0D, 0xF0, 0x05, 0xB1
140     };
141     return smmuv3_ids[regoffset / 4];
142 }
143 
144 static inline bool smmuv3_eventq_irq_enabled(SMMUv3State *s)
145 {
146     return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN);
147 }
148 
149 static inline bool smmuv3_gerror_irq_enabled(SMMUv3State *s)
150 {
151     return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN);
152 }
153 
154 /* Queue Handling */
155 
156 #define Q_BASE(q)          ((q)->base & SMMU_BASE_ADDR_MASK)
157 #define WRAP_MASK(q)       (1 << (q)->log2size)
158 #define INDEX_MASK(q)      (((1 << (q)->log2size)) - 1)
159 #define WRAP_INDEX_MASK(q) ((1 << ((q)->log2size + 1)) - 1)
160 
161 #define Q_CONS(q) ((q)->cons & INDEX_MASK(q))
162 #define Q_PROD(q) ((q)->prod & INDEX_MASK(q))
163 
164 #define Q_CONS_ENTRY(q)  (Q_BASE(q) + (q)->entry_size * Q_CONS(q))
165 #define Q_PROD_ENTRY(q)  (Q_BASE(q) + (q)->entry_size * Q_PROD(q))
166 
167 #define Q_CONS_WRAP(q) (((q)->cons & WRAP_MASK(q)) >> (q)->log2size)
168 #define Q_PROD_WRAP(q) (((q)->prod & WRAP_MASK(q)) >> (q)->log2size)
169 
170 static inline bool smmuv3_q_full(SMMUQueue *q)
171 {
172     return ((q->cons ^ q->prod) & WRAP_INDEX_MASK(q)) == WRAP_MASK(q);
173 }
174 
175 static inline bool smmuv3_q_empty(SMMUQueue *q)
176 {
177     return (q->cons & WRAP_INDEX_MASK(q)) == (q->prod & WRAP_INDEX_MASK(q));
178 }
179 
180 static inline void queue_prod_incr(SMMUQueue *q)
181 {
182     q->prod = (q->prod + 1) & WRAP_INDEX_MASK(q);
183 }
184 
185 static inline void queue_cons_incr(SMMUQueue *q)
186 {
187     /*
188      * We have to use deposit for the CONS registers to preserve
189      * the ERR field in the high bits.
190      */
191     q->cons = deposit32(q->cons, 0, q->log2size + 1, q->cons + 1);
192 }
193 
194 static inline bool smmuv3_cmdq_enabled(SMMUv3State *s)
195 {
196     return FIELD_EX32(s->cr[0], CR0, CMDQEN);
197 }
198 
199 static inline bool smmuv3_eventq_enabled(SMMUv3State *s)
200 {
201     return FIELD_EX32(s->cr[0], CR0, EVENTQEN);
202 }
203 
204 static inline void smmu_write_cmdq_err(SMMUv3State *s, uint32_t err_type)
205 {
206     s->cmdq.cons = FIELD_DP32(s->cmdq.cons, CMDQ_CONS, ERR, err_type);
207 }
208 
209 void smmuv3_write_eventq(SMMUv3State *s, Evt *evt);
210 
211 /* Commands */
212 
213 typedef enum SMMUCommandType {
214     SMMU_CMD_NONE            = 0x00,
215     SMMU_CMD_PREFETCH_CONFIG       ,
216     SMMU_CMD_PREFETCH_ADDR,
217     SMMU_CMD_CFGI_STE,
218     SMMU_CMD_CFGI_STE_RANGE,
219     SMMU_CMD_CFGI_CD,
220     SMMU_CMD_CFGI_CD_ALL,
221     SMMU_CMD_CFGI_ALL,
222     SMMU_CMD_TLBI_NH_ALL     = 0x10,
223     SMMU_CMD_TLBI_NH_ASID,
224     SMMU_CMD_TLBI_NH_VA,
225     SMMU_CMD_TLBI_NH_VAA,
226     SMMU_CMD_TLBI_EL3_ALL    = 0x18,
227     SMMU_CMD_TLBI_EL3_VA     = 0x1a,
228     SMMU_CMD_TLBI_EL2_ALL    = 0x20,
229     SMMU_CMD_TLBI_EL2_ASID,
230     SMMU_CMD_TLBI_EL2_VA,
231     SMMU_CMD_TLBI_EL2_VAA,
232     SMMU_CMD_TLBI_S12_VMALL  = 0x28,
233     SMMU_CMD_TLBI_S2_IPA     = 0x2a,
234     SMMU_CMD_TLBI_NSNH_ALL   = 0x30,
235     SMMU_CMD_ATC_INV         = 0x40,
236     SMMU_CMD_PRI_RESP,
237     SMMU_CMD_RESUME          = 0x44,
238     SMMU_CMD_STALL_TERM,
239     SMMU_CMD_SYNC,
240 } SMMUCommandType;
241 
242 static const char *cmd_stringify[] = {
243     [SMMU_CMD_PREFETCH_CONFIG] = "SMMU_CMD_PREFETCH_CONFIG",
244     [SMMU_CMD_PREFETCH_ADDR]   = "SMMU_CMD_PREFETCH_ADDR",
245     [SMMU_CMD_CFGI_STE]        = "SMMU_CMD_CFGI_STE",
246     [SMMU_CMD_CFGI_STE_RANGE]  = "SMMU_CMD_CFGI_STE_RANGE",
247     [SMMU_CMD_CFGI_CD]         = "SMMU_CMD_CFGI_CD",
248     [SMMU_CMD_CFGI_CD_ALL]     = "SMMU_CMD_CFGI_CD_ALL",
249     [SMMU_CMD_CFGI_ALL]        = "SMMU_CMD_CFGI_ALL",
250     [SMMU_CMD_TLBI_NH_ALL]     = "SMMU_CMD_TLBI_NH_ALL",
251     [SMMU_CMD_TLBI_NH_ASID]    = "SMMU_CMD_TLBI_NH_ASID",
252     [SMMU_CMD_TLBI_NH_VA]      = "SMMU_CMD_TLBI_NH_VA",
253     [SMMU_CMD_TLBI_NH_VAA]     = "SMMU_CMD_TLBI_NH_VAA",
254     [SMMU_CMD_TLBI_EL3_ALL]    = "SMMU_CMD_TLBI_EL3_ALL",
255     [SMMU_CMD_TLBI_EL3_VA]     = "SMMU_CMD_TLBI_EL3_VA",
256     [SMMU_CMD_TLBI_EL2_ALL]    = "SMMU_CMD_TLBI_EL2_ALL",
257     [SMMU_CMD_TLBI_EL2_ASID]   = "SMMU_CMD_TLBI_EL2_ASID",
258     [SMMU_CMD_TLBI_EL2_VA]     = "SMMU_CMD_TLBI_EL2_VA",
259     [SMMU_CMD_TLBI_EL2_VAA]    = "SMMU_CMD_TLBI_EL2_VAA",
260     [SMMU_CMD_TLBI_S12_VMALL]  = "SMMU_CMD_TLBI_S12_VMALL",
261     [SMMU_CMD_TLBI_S2_IPA]     = "SMMU_CMD_TLBI_S2_IPA",
262     [SMMU_CMD_TLBI_NSNH_ALL]   = "SMMU_CMD_TLBI_NSNH_ALL",
263     [SMMU_CMD_ATC_INV]         = "SMMU_CMD_ATC_INV",
264     [SMMU_CMD_PRI_RESP]        = "SMMU_CMD_PRI_RESP",
265     [SMMU_CMD_RESUME]          = "SMMU_CMD_RESUME",
266     [SMMU_CMD_STALL_TERM]      = "SMMU_CMD_STALL_TERM",
267     [SMMU_CMD_SYNC]            = "SMMU_CMD_SYNC",
268 };
269 
270 static inline const char *smmu_cmd_string(SMMUCommandType type)
271 {
272     if (type > SMMU_CMD_NONE && type < ARRAY_SIZE(cmd_stringify)) {
273         return cmd_stringify[type] ? cmd_stringify[type] : "UNKNOWN";
274     } else {
275         return "INVALID";
276     }
277 }
278 
279 /* CMDQ fields */
280 
281 typedef enum {
282     SMMU_CERROR_NONE = 0,
283     SMMU_CERROR_ILL,
284     SMMU_CERROR_ABT,
285     SMMU_CERROR_ATC_INV_SYNC,
286 } SMMUCmdError;
287 
288 enum { /* Command completion notification */
289     CMD_SYNC_SIG_NONE,
290     CMD_SYNC_SIG_IRQ,
291     CMD_SYNC_SIG_SEV,
292 };
293 
294 #define CMD_TYPE(x)         extract32((x)->word[0], 0 , 8)
295 #define CMD_SSEC(x)         extract32((x)->word[0], 10, 1)
296 #define CMD_SSV(x)          extract32((x)->word[0], 11, 1)
297 #define CMD_RESUME_AC(x)    extract32((x)->word[0], 12, 1)
298 #define CMD_RESUME_AB(x)    extract32((x)->word[0], 13, 1)
299 #define CMD_SYNC_CS(x)      extract32((x)->word[0], 12, 2)
300 #define CMD_SSID(x)         extract32((x)->word[0], 12, 20)
301 #define CMD_SID(x)          ((x)->word[1])
302 #define CMD_VMID(x)         extract32((x)->word[1], 0 , 16)
303 #define CMD_ASID(x)         extract32((x)->word[1], 16, 16)
304 #define CMD_RESUME_STAG(x)  extract32((x)->word[2], 0 , 16)
305 #define CMD_RESP(x)         extract32((x)->word[2], 11, 2)
306 #define CMD_LEAF(x)         extract32((x)->word[2], 0 , 1)
307 #define CMD_STE_RANGE(x)    extract32((x)->word[2], 0 , 5)
308 #define CMD_ADDR(x) ({                                        \
309             uint64_t high = (uint64_t)(x)->word[3];           \
310             uint64_t low = extract32((x)->word[2], 12, 20);    \
311             uint64_t addr = high << 32 | (low << 12);         \
312             addr;                                             \
313         })
314 
315 #define SMMU_FEATURE_2LVL_STE (1 << 0)
316 
317 #endif
318