1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * IOMMU API for ARM architected SMMUv3 implementations.
4 *
5 * Copyright (C) 2015 ARM Limited
6 */
7
8 #ifndef _ARM_SMMU_V3_H
9 #define _ARM_SMMU_V3_H
10
11 #include <linux/bitfield.h>
12 #include <linux/iommu.h>
13 #include <linux/iommufd.h>
14 #include <linux/kernel.h>
15 #include <linux/mmzone.h>
16 #include <linux/sizes.h>
17
18 struct arm_smmu_device;
19 struct arm_vsmmu;
20
21 /* MMIO registers */
22 #define ARM_SMMU_IDR0 0x0
23 #define IDR0_ST_LVL GENMASK(28, 27)
24 #define IDR0_ST_LVL_2LVL 1
25 #define IDR0_STALL_MODEL GENMASK(25, 24)
26 #define IDR0_STALL_MODEL_STALL 0
27 #define IDR0_STALL_MODEL_FORCE 2
28 #define IDR0_TTENDIAN GENMASK(22, 21)
29 #define IDR0_TTENDIAN_MIXED 0
30 #define IDR0_TTENDIAN_LE 2
31 #define IDR0_TTENDIAN_BE 3
32 #define IDR0_CD2L (1 << 19)
33 #define IDR0_VMID16 (1 << 18)
34 #define IDR0_PRI (1 << 16)
35 #define IDR0_SEV (1 << 14)
36 #define IDR0_MSI (1 << 13)
37 #define IDR0_ASID16 (1 << 12)
38 #define IDR0_ATS (1 << 10)
39 #define IDR0_HYP (1 << 9)
40 #define IDR0_HTTU GENMASK(7, 6)
41 #define IDR0_HTTU_ACCESS 1
42 #define IDR0_HTTU_ACCESS_DIRTY 2
43 #define IDR0_COHACC (1 << 4)
44 #define IDR0_TTF GENMASK(3, 2)
45 #define IDR0_TTF_AARCH64 2
46 #define IDR0_S1P (1 << 1)
47 #define IDR0_S2P (1 << 0)
48
49 #define ARM_SMMU_IDR1 0x4
50 #define IDR1_TABLES_PRESET (1 << 30)
51 #define IDR1_QUEUES_PRESET (1 << 29)
52 #define IDR1_REL (1 << 28)
53 #define IDR1_ATTR_TYPES_OVR (1 << 27)
54 #define IDR1_CMDQS GENMASK(25, 21)
55 #define IDR1_EVTQS GENMASK(20, 16)
56 #define IDR1_PRIQS GENMASK(15, 11)
57 #define IDR1_SSIDSIZE GENMASK(10, 6)
58 #define IDR1_SIDSIZE GENMASK(5, 0)
59
60 #define ARM_SMMU_IDR3 0xc
61 #define IDR3_FWB (1 << 8)
62 #define IDR3_RIL (1 << 10)
63 #define IDR3_BBM GENMASK(12, 11)
64
65 #define ARM_SMMU_IDR5 0x14
66 #define IDR5_STALL_MAX GENMASK(31, 16)
67 #define IDR5_GRAN64K (1 << 6)
68 #define IDR5_GRAN16K (1 << 5)
69 #define IDR5_GRAN4K (1 << 4)
70 #define IDR5_OAS GENMASK(2, 0)
71 #define IDR5_OAS_32_BIT 0
72 #define IDR5_OAS_36_BIT 1
73 #define IDR5_OAS_40_BIT 2
74 #define IDR5_OAS_42_BIT 3
75 #define IDR5_OAS_44_BIT 4
76 #define IDR5_OAS_48_BIT 5
77 #define IDR5_OAS_52_BIT 6
78 #define IDR5_VAX GENMASK(11, 10)
79 #define IDR5_VAX_52_BIT 1
80
81 #define ARM_SMMU_IIDR 0x18
82 #define IIDR_PRODUCTID GENMASK(31, 20)
83 #define IIDR_VARIANT GENMASK(19, 16)
84 #define IIDR_REVISION GENMASK(15, 12)
85 #define IIDR_IMPLEMENTER GENMASK(11, 0)
86
87 #define ARM_SMMU_AIDR 0x1C
88
89 #define ARM_SMMU_CR0 0x20
90 #define CR0_ATSCHK (1 << 4)
91 #define CR0_CMDQEN (1 << 3)
92 #define CR0_EVTQEN (1 << 2)
93 #define CR0_PRIQEN (1 << 1)
94 #define CR0_SMMUEN (1 << 0)
95
96 #define ARM_SMMU_CR0ACK 0x24
97
98 #define ARM_SMMU_CR1 0x28
99 #define CR1_TABLE_SH GENMASK(11, 10)
100 #define CR1_TABLE_OC GENMASK(9, 8)
101 #define CR1_TABLE_IC GENMASK(7, 6)
102 #define CR1_QUEUE_SH GENMASK(5, 4)
103 #define CR1_QUEUE_OC GENMASK(3, 2)
104 #define CR1_QUEUE_IC GENMASK(1, 0)
105 /* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
106 #define CR1_CACHE_NC 0
107 #define CR1_CACHE_WB 1
108 #define CR1_CACHE_WT 2
109
110 #define ARM_SMMU_CR2 0x2c
111 #define CR2_PTM (1 << 2)
112 #define CR2_RECINVSID (1 << 1)
113 #define CR2_E2H (1 << 0)
114
115 #define ARM_SMMU_GBPA 0x44
116 #define GBPA_UPDATE (1 << 31)
117 #define GBPA_ABORT (1 << 20)
118
119 #define ARM_SMMU_IRQ_CTRL 0x50
120 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
121 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
122 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
123
124 #define ARM_SMMU_IRQ_CTRLACK 0x54
125
126 #define ARM_SMMU_GERROR 0x60
127 #define GERROR_SFM_ERR (1 << 8)
128 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
129 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
130 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
131 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
132 #define GERROR_PRIQ_ABT_ERR (1 << 3)
133 #define GERROR_EVTQ_ABT_ERR (1 << 2)
134 #define GERROR_CMDQ_ERR (1 << 0)
135 #define GERROR_ERR_MASK 0x1fd
136
137 #define ARM_SMMU_GERRORN 0x64
138
139 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
140 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
141 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
142
143 #define ARM_SMMU_STRTAB_BASE 0x80
144 #define STRTAB_BASE_RA (1UL << 62)
145 #define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
146
147 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
148 #define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
149 #define STRTAB_BASE_CFG_FMT_LINEAR 0
150 #define STRTAB_BASE_CFG_FMT_2LVL 1
151 #define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
152 #define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
153
154 #define ARM_SMMU_CMDQ_BASE 0x90
155 #define ARM_SMMU_CMDQ_PROD 0x98
156 #define ARM_SMMU_CMDQ_CONS 0x9c
157
158 #define ARM_SMMU_EVTQ_BASE 0xa0
159 #define ARM_SMMU_EVTQ_PROD 0xa8
160 #define ARM_SMMU_EVTQ_CONS 0xac
161 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
162 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
163 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
164
165 #define ARM_SMMU_PRIQ_BASE 0xc0
166 #define ARM_SMMU_PRIQ_PROD 0xc8
167 #define ARM_SMMU_PRIQ_CONS 0xcc
168 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
169 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
170 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
171
172 #define ARM_SMMU_REG_SZ 0xe00
173
174 /* Common MSI config fields */
175 #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
176 #define MSI_CFG2_SH GENMASK(5, 4)
177 #define MSI_CFG2_MEMATTR GENMASK(3, 0)
178
179 /* Common memory attribute values */
180 #define ARM_SMMU_SH_NSH 0
181 #define ARM_SMMU_SH_OSH 2
182 #define ARM_SMMU_SH_ISH 3
183 #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
184 #define ARM_SMMU_MEMATTR_OIWB 0xf
185
186 #define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1))
187 #define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift))
188 #define Q_OVERFLOW_FLAG (1U << 31)
189 #define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG)
190 #define Q_ENT(q, p) ((q)->base + \
191 Q_IDX(&((q)->llq), p) * \
192 (q)->ent_dwords)
193
194 #define Q_BASE_RWA (1UL << 62)
195 #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
196 #define Q_BASE_LOG2SIZE GENMASK(4, 0)
197
198 /* Ensure DMA allocations are naturally aligned */
199 #ifdef CONFIG_CMA_ALIGNMENT
200 #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
201 #else
202 #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_PAGE_ORDER)
203 #endif
204
205 /*
206 * Stream table.
207 *
208 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
209 * 2lvl: 128k L1 entries,
210 * 256 lazy entries per table (each table covers a PCI bus)
211 */
212 #define STRTAB_SPLIT 8
213
214 #define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
215 #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
216
217 #define STRTAB_STE_DWORDS 8
218
219 struct arm_smmu_ste {
220 __le64 data[STRTAB_STE_DWORDS];
221 };
222
223 #define STRTAB_NUM_L2_STES (1 << STRTAB_SPLIT)
224 struct arm_smmu_strtab_l2 {
225 struct arm_smmu_ste stes[STRTAB_NUM_L2_STES];
226 };
227
228 struct arm_smmu_strtab_l1 {
229 __le64 l2ptr;
230 };
231 #define STRTAB_MAX_L1_ENTRIES (1 << 17)
232
arm_smmu_strtab_l1_idx(u32 sid)233 static inline u32 arm_smmu_strtab_l1_idx(u32 sid)
234 {
235 return sid / STRTAB_NUM_L2_STES;
236 }
237
arm_smmu_strtab_l2_idx(u32 sid)238 static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
239 {
240 return sid % STRTAB_NUM_L2_STES;
241 }
242
243 #define STRTAB_STE_0_V (1UL << 0)
244 #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
245 #define STRTAB_STE_0_CFG_ABORT 0
246 #define STRTAB_STE_0_CFG_BYPASS 4
247 #define STRTAB_STE_0_CFG_S1_TRANS 5
248 #define STRTAB_STE_0_CFG_S2_TRANS 6
249 #define STRTAB_STE_0_CFG_NESTED 7
250
251 #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
252 #define STRTAB_STE_0_S1FMT_LINEAR 0
253 #define STRTAB_STE_0_S1FMT_64K_L2 2
254 #define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
255 #define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
256
257 #define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0)
258 #define STRTAB_STE_1_S1DSS_TERMINATE 0x0
259 #define STRTAB_STE_1_S1DSS_BYPASS 0x1
260 #define STRTAB_STE_1_S1DSS_SSID0 0x2
261
262 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
263 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
264 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
265 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
266 #define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
267 #define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
268 #define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
269
270 #define STRTAB_STE_1_MEV (1UL << 19)
271 #define STRTAB_STE_1_S2FWB (1UL << 25)
272 #define STRTAB_STE_1_S1STALLD (1UL << 27)
273
274 #define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
275 #define STRTAB_STE_1_EATS_ABT 0UL
276 #define STRTAB_STE_1_EATS_TRANS 1UL
277 #define STRTAB_STE_1_EATS_S1CHK 2UL
278
279 #define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
280 #define STRTAB_STE_1_STRW_NSEL1 0UL
281 #define STRTAB_STE_1_STRW_EL2 2UL
282
283 #define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
284 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
285
286 #define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
287 #define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
288 #define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0)
289 #define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6)
290 #define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8)
291 #define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10)
292 #define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12)
293 #define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14)
294 #define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16)
295 #define STRTAB_STE_2_S2AA64 (1UL << 51)
296 #define STRTAB_STE_2_S2ENDI (1UL << 52)
297 #define STRTAB_STE_2_S2PTW (1UL << 54)
298 #define STRTAB_STE_2_S2S (1UL << 57)
299 #define STRTAB_STE_2_S2R (1UL << 58)
300
301 #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
302
303 /* These bits can be controlled by userspace for STRTAB_STE_0_CFG_NESTED */
304 #define STRTAB_STE_0_NESTING_ALLOWED \
305 cpu_to_le64(STRTAB_STE_0_V | STRTAB_STE_0_CFG | STRTAB_STE_0_S1FMT | \
306 STRTAB_STE_0_S1CTXPTR_MASK | STRTAB_STE_0_S1CDMAX)
307 #define STRTAB_STE_1_NESTING_ALLOWED \
308 cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR | \
309 STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH | \
310 STRTAB_STE_1_S1STALLD | STRTAB_STE_1_EATS)
311
312 /*
313 * Context descriptors.
314 *
315 * Linear: when less than 1024 SSIDs are supported
316 * 2lvl: at most 1024 L1 entries,
317 * 1024 lazy entries per table.
318 */
319 #define CTXDESC_L2_ENTRIES 1024
320
321 #define CTXDESC_L1_DESC_V (1UL << 0)
322 #define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
323
324 #define CTXDESC_CD_DWORDS 8
325
326 struct arm_smmu_cd {
327 __le64 data[CTXDESC_CD_DWORDS];
328 };
329
330 struct arm_smmu_cdtab_l2 {
331 struct arm_smmu_cd cds[CTXDESC_L2_ENTRIES];
332 };
333
334 struct arm_smmu_cdtab_l1 {
335 __le64 l2ptr;
336 };
337
arm_smmu_cdtab_l1_idx(unsigned int ssid)338 static inline unsigned int arm_smmu_cdtab_l1_idx(unsigned int ssid)
339 {
340 return ssid / CTXDESC_L2_ENTRIES;
341 }
342
arm_smmu_cdtab_l2_idx(unsigned int ssid)343 static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
344 {
345 return ssid % CTXDESC_L2_ENTRIES;
346 }
347
348 #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
349 #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
350 #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
351 #define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
352 #define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
353 #define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
354 #define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
355
356 #define CTXDESC_CD_0_ENDI (1UL << 15)
357 #define CTXDESC_CD_0_V (1UL << 31)
358
359 #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
360 #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
361
362 #define CTXDESC_CD_0_TCR_HA (1UL << 43)
363 #define CTXDESC_CD_0_TCR_HD (1UL << 42)
364
365 #define CTXDESC_CD_0_AA64 (1UL << 41)
366 #define CTXDESC_CD_0_S (1UL << 44)
367 #define CTXDESC_CD_0_R (1UL << 45)
368 #define CTXDESC_CD_0_A (1UL << 46)
369 #define CTXDESC_CD_0_ASET (1UL << 47)
370 #define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
371
372 #define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
373
374 /*
375 * When the SMMU only supports linear context descriptor tables, pick a
376 * reasonable size limit (64kB).
377 */
378 #define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / sizeof(struct arm_smmu_cd))
379
380 /* Command queue */
381 #define CMDQ_ENT_SZ_SHIFT 4
382 #define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
383 #define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
384
385 #define CMDQ_CONS_ERR GENMASK(30, 24)
386 #define CMDQ_ERR_CERROR_NONE_IDX 0
387 #define CMDQ_ERR_CERROR_ILL_IDX 1
388 #define CMDQ_ERR_CERROR_ABT_IDX 2
389 #define CMDQ_ERR_CERROR_ATC_INV_IDX 3
390
391 #define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG
392
393 /*
394 * This is used to size the command queue and therefore must be at least
395 * BITS_PER_LONG so that the valid_map works correctly (it relies on the
396 * total number of queue entries being a multiple of BITS_PER_LONG).
397 */
398 #define CMDQ_BATCH_ENTRIES BITS_PER_LONG
399
400 #define CMDQ_0_OP GENMASK_ULL(7, 0)
401 #define CMDQ_0_SSV (1UL << 11)
402
403 #define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
404 #define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
405 #define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
406
407 #define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12)
408 #define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
409 #define CMDQ_CFGI_1_LEAF (1UL << 0)
410 #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
411
412 #define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
413 #define CMDQ_TLBI_RANGE_NUM_MAX 31
414 #define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
415 #define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
416 #define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
417 #define CMDQ_TLBI_1_LEAF (1UL << 0)
418 #define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
419 #define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
420 #define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
421 #define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
422
423 #define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
424 #define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
425 #define CMDQ_ATC_0_GLOBAL (1UL << 9)
426 #define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
427 #define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
428
429 #define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
430 #define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
431 #define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
432 #define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
433
434 #define CMDQ_RESUME_0_RESP_TERM 0UL
435 #define CMDQ_RESUME_0_RESP_RETRY 1UL
436 #define CMDQ_RESUME_0_RESP_ABORT 2UL
437 #define CMDQ_RESUME_0_RESP GENMASK_ULL(13, 12)
438 #define CMDQ_RESUME_0_SID GENMASK_ULL(63, 32)
439 #define CMDQ_RESUME_1_STAG GENMASK_ULL(15, 0)
440
441 #define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
442 #define CMDQ_SYNC_0_CS_NONE 0
443 #define CMDQ_SYNC_0_CS_IRQ 1
444 #define CMDQ_SYNC_0_CS_SEV 2
445 #define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
446 #define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
447 #define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
448 #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
449
450 /* Event queue */
451 #define EVTQ_ENT_SZ_SHIFT 5
452 #define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
453 #define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
454
455 #define EVTQ_0_ID GENMASK_ULL(7, 0)
456
457 #define EVT_ID_BAD_STREAMID_CONFIG 0x02
458 #define EVT_ID_STE_FETCH_FAULT 0x03
459 #define EVT_ID_BAD_STE_CONFIG 0x04
460 #define EVT_ID_STREAM_DISABLED_FAULT 0x06
461 #define EVT_ID_BAD_SUBSTREAMID_CONFIG 0x08
462 #define EVT_ID_CD_FETCH_FAULT 0x09
463 #define EVT_ID_BAD_CD_CONFIG 0x0a
464 #define EVT_ID_TRANSLATION_FAULT 0x10
465 #define EVT_ID_ADDR_SIZE_FAULT 0x11
466 #define EVT_ID_ACCESS_FAULT 0x12
467 #define EVT_ID_PERMISSION_FAULT 0x13
468 #define EVT_ID_VMS_FETCH_FAULT 0x25
469
470 #define EVTQ_0_SSV (1UL << 11)
471 #define EVTQ_0_SSID GENMASK_ULL(31, 12)
472 #define EVTQ_0_SID GENMASK_ULL(63, 32)
473 #define EVTQ_1_STAG GENMASK_ULL(15, 0)
474 #define EVTQ_1_STALL (1UL << 31)
475 #define EVTQ_1_PnU (1UL << 33)
476 #define EVTQ_1_InD (1UL << 34)
477 #define EVTQ_1_RnW (1UL << 35)
478 #define EVTQ_1_S2 (1UL << 39)
479 #define EVTQ_1_CLASS GENMASK_ULL(41, 40)
480 #define EVTQ_1_CLASS_TT 0x01
481 #define EVTQ_1_TT_READ (1UL << 44)
482 #define EVTQ_2_ADDR GENMASK_ULL(63, 0)
483 #define EVTQ_3_IPA GENMASK_ULL(51, 12)
484 #define EVTQ_3_FETCH_ADDR GENMASK_ULL(51, 3)
485
486 /* PRI queue */
487 #define PRIQ_ENT_SZ_SHIFT 4
488 #define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
489 #define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
490
491 #define PRIQ_0_SID GENMASK_ULL(31, 0)
492 #define PRIQ_0_SSID GENMASK_ULL(51, 32)
493 #define PRIQ_0_PERM_PRIV (1UL << 58)
494 #define PRIQ_0_PERM_EXEC (1UL << 59)
495 #define PRIQ_0_PERM_READ (1UL << 60)
496 #define PRIQ_0_PERM_WRITE (1UL << 61)
497 #define PRIQ_0_PRG_LAST (1UL << 62)
498 #define PRIQ_0_SSID_V (1UL << 63)
499
500 #define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
501 #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
502
503 /* High-level queue structures */
504 #define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
505 #define ARM_SMMU_POLL_SPIN_COUNT 10
506
507 #define MSI_IOVA_BASE 0x8000000
508 #define MSI_IOVA_LENGTH 0x100000
509
510 enum pri_resp {
511 PRI_RESP_DENY = 0,
512 PRI_RESP_FAIL = 1,
513 PRI_RESP_SUCC = 2,
514 };
515
516 struct arm_smmu_cmdq_ent {
517 /* Common fields */
518 u8 opcode;
519 bool substream_valid;
520
521 /* Command-specific fields */
522 union {
523 #define CMDQ_OP_PREFETCH_CFG 0x1
524 struct {
525 u32 sid;
526 } prefetch;
527
528 #define CMDQ_OP_CFGI_STE 0x3
529 #define CMDQ_OP_CFGI_ALL 0x4
530 #define CMDQ_OP_CFGI_CD 0x5
531 #define CMDQ_OP_CFGI_CD_ALL 0x6
532 struct {
533 u32 sid;
534 u32 ssid;
535 union {
536 bool leaf;
537 u8 span;
538 };
539 } cfgi;
540
541 #define CMDQ_OP_TLBI_NH_ALL 0x10
542 #define CMDQ_OP_TLBI_NH_ASID 0x11
543 #define CMDQ_OP_TLBI_NH_VA 0x12
544 #define CMDQ_OP_TLBI_NH_VAA 0x13
545 #define CMDQ_OP_TLBI_EL2_ALL 0x20
546 #define CMDQ_OP_TLBI_EL2_ASID 0x21
547 #define CMDQ_OP_TLBI_EL2_VA 0x22
548 #define CMDQ_OP_TLBI_S12_VMALL 0x28
549 #define CMDQ_OP_TLBI_S2_IPA 0x2a
550 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
551 struct {
552 u8 num;
553 u8 scale;
554 u16 asid;
555 u16 vmid;
556 bool leaf;
557 u8 ttl;
558 u8 tg;
559 u64 addr;
560 } tlbi;
561
562 #define CMDQ_OP_ATC_INV 0x40
563 #define ATC_INV_SIZE_ALL 52
564 struct {
565 u32 sid;
566 u32 ssid;
567 u64 addr;
568 u8 size;
569 bool global;
570 } atc;
571
572 #define CMDQ_OP_PRI_RESP 0x41
573 struct {
574 u32 sid;
575 u32 ssid;
576 u16 grpid;
577 enum pri_resp resp;
578 } pri;
579
580 #define CMDQ_OP_RESUME 0x44
581 struct {
582 u32 sid;
583 u16 stag;
584 u8 resp;
585 } resume;
586
587 #define CMDQ_OP_CMD_SYNC 0x46
588 struct {
589 u64 msiaddr;
590 } sync;
591 };
592 };
593
594 struct arm_smmu_ll_queue {
595 union {
596 u64 val;
597 struct {
598 u32 prod;
599 u32 cons;
600 };
601 struct {
602 atomic_t prod;
603 atomic_t cons;
604 } atomic;
605 u8 __pad[SMP_CACHE_BYTES];
606 } ____cacheline_aligned_in_smp;
607 u32 max_n_shift;
608 };
609
610 struct arm_smmu_queue {
611 struct arm_smmu_ll_queue llq;
612 int irq; /* Wired interrupt */
613
614 __le64 *base;
615 dma_addr_t base_dma;
616 u64 q_base;
617
618 size_t ent_dwords;
619
620 u32 __iomem *prod_reg;
621 u32 __iomem *cons_reg;
622 };
623
624 struct arm_smmu_queue_poll {
625 ktime_t timeout;
626 unsigned int delay;
627 unsigned int spin_cnt;
628 bool wfe;
629 };
630
631 struct arm_smmu_cmdq {
632 struct arm_smmu_queue q;
633 atomic_long_t *valid_map;
634 atomic_t owner_prod;
635 atomic_t lock;
636 bool (*supports_cmd)(struct arm_smmu_cmdq_ent *ent);
637 };
638
arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq * cmdq,struct arm_smmu_cmdq_ent * ent)639 static inline bool arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq *cmdq,
640 struct arm_smmu_cmdq_ent *ent)
641 {
642 return cmdq->supports_cmd ? cmdq->supports_cmd(ent) : true;
643 }
644
645 struct arm_smmu_cmdq_batch {
646 u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
647 struct arm_smmu_cmdq *cmdq;
648 int num;
649 };
650
651 struct arm_smmu_evtq {
652 struct arm_smmu_queue q;
653 struct iopf_queue *iopf;
654 u32 max_stalls;
655 };
656
657 struct arm_smmu_priq {
658 struct arm_smmu_queue q;
659 };
660
661 /* High-level stream table and context descriptor structures */
662 struct arm_smmu_ctx_desc {
663 u16 asid;
664 };
665
666 struct arm_smmu_ctx_desc_cfg {
667 union {
668 struct {
669 struct arm_smmu_cd *table;
670 unsigned int num_ents;
671 } linear;
672 struct {
673 struct arm_smmu_cdtab_l1 *l1tab;
674 struct arm_smmu_cdtab_l2 **l2ptrs;
675 unsigned int num_l1_ents;
676 } l2;
677 };
678 dma_addr_t cdtab_dma;
679 unsigned int used_ssids;
680 u8 in_ste;
681 u8 s1fmt;
682 /* log2 of the maximum number of CDs supported by this table */
683 u8 s1cdmax;
684 };
685
686 static inline bool
arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg * cfg)687 arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg)
688 {
689 return cfg->linear.table || cfg->l2.l1tab;
690 }
691
692 /* True if the cd table has SSIDS > 0 in use. */
arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg * cd_table)693 static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
694 {
695 return cd_table->used_ssids;
696 }
697
698 struct arm_smmu_s2_cfg {
699 u16 vmid;
700 };
701
702 struct arm_smmu_strtab_cfg {
703 union {
704 struct {
705 struct arm_smmu_ste *table;
706 dma_addr_t ste_dma;
707 unsigned int num_ents;
708 } linear;
709 struct {
710 struct arm_smmu_strtab_l1 *l1tab;
711 struct arm_smmu_strtab_l2 **l2ptrs;
712 dma_addr_t l1_dma;
713 unsigned int num_l1_ents;
714 } l2;
715 };
716 };
717
718 struct arm_smmu_impl_ops {
719 int (*device_reset)(struct arm_smmu_device *smmu);
720 void (*device_remove)(struct arm_smmu_device *smmu);
721 int (*init_structures)(struct arm_smmu_device *smmu);
722 struct arm_smmu_cmdq *(*get_secondary_cmdq)(
723 struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
724 /*
725 * An implementation should define its own type other than the default
726 * IOMMU_HW_INFO_TYPE_ARM_SMMUV3. And it must validate the input @type
727 * to return its own structure.
728 */
729 void *(*hw_info)(struct arm_smmu_device *smmu, u32 *length,
730 enum iommu_hw_info_type *type);
731 size_t (*get_viommu_size)(enum iommu_viommu_type viommu_type);
732 int (*vsmmu_init)(struct arm_vsmmu *vsmmu,
733 const struct iommu_user_data *user_data);
734 };
735
736 /* An SMMUv3 instance */
737 struct arm_smmu_device {
738 struct device *dev;
739 struct device *impl_dev;
740 const struct arm_smmu_impl_ops *impl_ops;
741
742 void __iomem *base;
743 void __iomem *page1;
744
745 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
746 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
747 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
748 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
749 #define ARM_SMMU_FEAT_PRI (1 << 4)
750 #define ARM_SMMU_FEAT_ATS (1 << 5)
751 #define ARM_SMMU_FEAT_SEV (1 << 6)
752 #define ARM_SMMU_FEAT_MSI (1 << 7)
753 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
754 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
755 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
756 #define ARM_SMMU_FEAT_STALLS (1 << 11)
757 #define ARM_SMMU_FEAT_HYP (1 << 12)
758 #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
759 #define ARM_SMMU_FEAT_VAX (1 << 14)
760 #define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
761 #define ARM_SMMU_FEAT_BTM (1 << 16)
762 #define ARM_SMMU_FEAT_SVA (1 << 17)
763 #define ARM_SMMU_FEAT_E2H (1 << 18)
764 #define ARM_SMMU_FEAT_NESTING (1 << 19)
765 #define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
766 #define ARM_SMMU_FEAT_HA (1 << 21)
767 #define ARM_SMMU_FEAT_HD (1 << 22)
768 #define ARM_SMMU_FEAT_S2FWB (1 << 23)
769 #define ARM_SMMU_FEAT_BBML2 (1 << 24)
770 u32 features;
771
772 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
773 #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
774 #define ARM_SMMU_OPT_MSIPOLL (1 << 2)
775 #define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
776 #define ARM_SMMU_OPT_TEGRA241_CMDQV (1 << 4)
777 u32 options;
778
779 struct arm_smmu_cmdq cmdq;
780 struct arm_smmu_evtq evtq;
781 struct arm_smmu_priq priq;
782
783 int gerr_irq;
784 int combined_irq;
785
786 unsigned long oas; /* PA */
787 unsigned long pgsize_bitmap;
788
789 #define ARM_SMMU_MAX_ASIDS (1 << 16)
790 unsigned int asid_bits;
791
792 #define ARM_SMMU_MAX_VMIDS (1 << 16)
793 unsigned int vmid_bits;
794 struct ida vmid_map;
795
796 unsigned int ssid_bits;
797 unsigned int sid_bits;
798
799 struct arm_smmu_strtab_cfg strtab_cfg;
800
801 /* IOMMU core code handle */
802 struct iommu_device iommu;
803
804 struct rb_root streams;
805 struct mutex streams_mutex;
806 };
807
808 struct arm_smmu_stream {
809 u32 id;
810 struct arm_smmu_master *master;
811 struct rb_node node;
812 };
813
814 struct arm_smmu_vmaster {
815 struct arm_vsmmu *vsmmu;
816 unsigned long vsid;
817 };
818
819 struct arm_smmu_event {
820 u8 stall : 1,
821 ssv : 1,
822 privileged : 1,
823 instruction : 1,
824 s2 : 1,
825 read : 1,
826 ttrnw : 1,
827 class_tt : 1;
828 u8 id;
829 u8 class;
830 u16 stag;
831 u32 sid;
832 u32 ssid;
833 u64 iova;
834 u64 ipa;
835 u64 fetch_addr;
836 struct device *dev;
837 };
838
839 /* SMMU private data for each master */
840 struct arm_smmu_master {
841 struct arm_smmu_device *smmu;
842 struct device *dev;
843 struct arm_smmu_stream *streams;
844 struct arm_smmu_vmaster *vmaster; /* use smmu->streams_mutex */
845 /* Locked by the iommu core using the group mutex */
846 struct arm_smmu_ctx_desc_cfg cd_table;
847 unsigned int num_streams;
848 bool ats_enabled : 1;
849 bool ste_ats_enabled : 1;
850 bool stall_enabled;
851 unsigned int ssid_bits;
852 unsigned int iopf_refcount;
853 };
854
855 /* SMMU private data for an IOMMU domain */
856 enum arm_smmu_domain_stage {
857 ARM_SMMU_DOMAIN_S1 = 0,
858 ARM_SMMU_DOMAIN_S2,
859 };
860
861 struct arm_smmu_domain {
862 struct arm_smmu_device *smmu;
863
864 struct io_pgtable_ops *pgtbl_ops;
865 atomic_t nr_ats_masters;
866
867 enum arm_smmu_domain_stage stage;
868 union {
869 struct arm_smmu_ctx_desc cd;
870 struct arm_smmu_s2_cfg s2_cfg;
871 };
872
873 struct iommu_domain domain;
874
875 /* List of struct arm_smmu_master_domain */
876 struct list_head devices;
877 spinlock_t devices_lock;
878 bool enforce_cache_coherency : 1;
879 bool nest_parent : 1;
880
881 struct mmu_notifier mmu_notifier;
882 };
883
884 struct arm_smmu_nested_domain {
885 struct iommu_domain domain;
886 struct arm_vsmmu *vsmmu;
887 bool enable_ats : 1;
888
889 __le64 ste[2];
890 };
891
892 /* The following are exposed for testing purposes. */
893 struct arm_smmu_entry_writer_ops;
894 struct arm_smmu_entry_writer {
895 const struct arm_smmu_entry_writer_ops *ops;
896 struct arm_smmu_master *master;
897 };
898
899 struct arm_smmu_entry_writer_ops {
900 void (*get_used)(const __le64 *entry, __le64 *used);
901 void (*get_update_safe)(const __le64 *cur, const __le64 *target,
902 __le64 *safe_bits);
903 void (*sync)(struct arm_smmu_entry_writer *writer);
904 };
905
906 void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
907 void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
908 struct arm_smmu_master *master,
909 struct arm_smmu_domain *smmu_domain,
910 bool ats_enabled);
911
912 #if IS_ENABLED(CONFIG_KUNIT)
913 void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
914 void arm_smmu_get_ste_update_safe(const __le64 *cur, const __le64 *target,
915 __le64 *safe_bits);
916 void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
917 const __le64 *target);
918 void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
919 void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
920 struct arm_smmu_ste *target);
921 void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
922 struct arm_smmu_master *master, bool ats_enabled,
923 unsigned int s1dss);
924 void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
925 struct arm_smmu_master *master, struct mm_struct *mm,
926 u16 asid);
927 #endif
928
929 struct arm_smmu_master_domain {
930 struct list_head devices_elm;
931 struct arm_smmu_master *master;
932 /*
933 * For nested domains the master_domain is threaded onto the S2 parent,
934 * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
935 */
936 struct iommu_domain *domain;
937 ioasid_t ssid;
938 bool nested_ats_flush : 1;
939 bool using_iopf : 1;
940 };
941
to_smmu_domain(struct iommu_domain * dom)942 static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
943 {
944 return container_of(dom, struct arm_smmu_domain, domain);
945 }
946
947 static inline struct arm_smmu_nested_domain *
to_smmu_nested_domain(struct iommu_domain * dom)948 to_smmu_nested_domain(struct iommu_domain *dom)
949 {
950 return container_of(dom, struct arm_smmu_nested_domain, domain);
951 }
952
953 extern struct xarray arm_smmu_asid_xa;
954 extern struct mutex arm_smmu_asid_lock;
955
956 struct arm_smmu_domain *arm_smmu_domain_alloc(void);
957
958 void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
959 struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
960 u32 ssid);
961 void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
962 struct arm_smmu_master *master,
963 struct arm_smmu_domain *smmu_domain);
964 void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
965 struct arm_smmu_cd *cdptr,
966 const struct arm_smmu_cd *target);
967
968 int arm_smmu_set_pasid(struct arm_smmu_master *master,
969 struct arm_smmu_domain *smmu_domain, ioasid_t pasid,
970 struct arm_smmu_cd *cd, struct iommu_domain *old);
971
972 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
973 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
974 size_t granule, bool leaf,
975 struct arm_smmu_domain *smmu_domain);
976 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
977 unsigned long iova, size_t size);
978
979 void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
980 struct arm_smmu_cmdq *cmdq);
981 int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
982 struct arm_smmu_queue *q, void __iomem *page,
983 unsigned long prod_off, unsigned long cons_off,
984 size_t dwords, const char *name);
985 int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
986 struct arm_smmu_cmdq *cmdq);
987
arm_smmu_master_canwbs(struct arm_smmu_master * master)988 static inline bool arm_smmu_master_canwbs(struct arm_smmu_master *master)
989 {
990 return dev_iommu_fwspec_get(master->dev)->flags &
991 IOMMU_FWSPEC_PCI_RC_CANWBS;
992 }
993
994 struct arm_smmu_attach_state {
995 /* Inputs */
996 struct iommu_domain *old_domain;
997 struct arm_smmu_master *master;
998 bool cd_needs_ats;
999 bool disable_ats;
1000 ioasid_t ssid;
1001 /* Resulting state */
1002 struct arm_smmu_vmaster *vmaster;
1003 bool ats_enabled;
1004 };
1005
1006 int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
1007 struct iommu_domain *new_domain);
1008 void arm_smmu_attach_commit(struct arm_smmu_attach_state *state);
1009 void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
1010 const struct arm_smmu_ste *target);
1011
1012 int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
1013 struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
1014 bool sync);
1015
1016 #ifdef CONFIG_ARM_SMMU_V3_SVA
1017 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
1018 void arm_smmu_sva_notifier_synchronize(void);
1019 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
1020 struct mm_struct *mm);
1021 #else /* CONFIG_ARM_SMMU_V3_SVA */
arm_smmu_sva_supported(struct arm_smmu_device * smmu)1022 static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
1023 {
1024 return false;
1025 }
1026
arm_smmu_sva_notifier_synchronize(void)1027 static inline void arm_smmu_sva_notifier_synchronize(void) {}
1028
1029 #define arm_smmu_sva_domain_alloc NULL
1030
1031 #endif /* CONFIG_ARM_SMMU_V3_SVA */
1032
1033 #ifdef CONFIG_TEGRA241_CMDQV
1034 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu);
1035 #else /* CONFIG_TEGRA241_CMDQV */
1036 static inline struct arm_smmu_device *
tegra241_cmdqv_probe(struct arm_smmu_device * smmu)1037 tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
1038 {
1039 return ERR_PTR(-ENODEV);
1040 }
1041 #endif /* CONFIG_TEGRA241_CMDQV */
1042
1043 struct arm_vsmmu {
1044 struct iommufd_viommu core;
1045 struct arm_smmu_device *smmu;
1046 struct arm_smmu_domain *s2_parent;
1047 u16 vmid;
1048 };
1049
1050 #if IS_ENABLED(CONFIG_ARM_SMMU_V3_IOMMUFD)
1051 void *arm_smmu_hw_info(struct device *dev, u32 *length,
1052 enum iommu_hw_info_type *type);
1053 size_t arm_smmu_get_viommu_size(struct device *dev,
1054 enum iommu_viommu_type viommu_type);
1055 int arm_vsmmu_init(struct iommufd_viommu *viommu,
1056 struct iommu_domain *parent_domain,
1057 const struct iommu_user_data *user_data);
1058 int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
1059 struct arm_smmu_nested_domain *nested_domain);
1060 void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state);
1061 void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master);
1062 int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt);
1063 struct iommu_domain *
1064 arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
1065 const struct iommu_user_data *user_data);
1066 int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
1067 struct iommu_user_data_array *array);
1068 #else
1069 #define arm_smmu_get_viommu_size NULL
1070 #define arm_smmu_hw_info NULL
1071 #define arm_vsmmu_init NULL
1072 #define arm_vsmmu_alloc_domain_nested NULL
1073 #define arm_vsmmu_cache_invalidate NULL
1074
1075 static inline int
arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state * state,struct arm_smmu_nested_domain * nested_domain)1076 arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
1077 struct arm_smmu_nested_domain *nested_domain)
1078 {
1079 return 0;
1080 }
1081
1082 static inline void
arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state * state)1083 arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
1084 {
1085 }
1086
1087 static inline void
arm_smmu_master_clear_vmaster(struct arm_smmu_master * master)1088 arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
1089 {
1090 }
1091
arm_vmaster_report_event(struct arm_smmu_vmaster * vmaster,u64 * evt)1092 static inline int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster,
1093 u64 *evt)
1094 {
1095 return -EOPNOTSUPP;
1096 }
1097 #endif /* CONFIG_ARM_SMMU_V3_IOMMUFD */
1098
1099 #endif /* _ARM_SMMU_V3_H */
1100