1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 #include <linux/highmem.h>
8 
9 #include "ivpu_drv.h"
10 #include "ivpu_hw.h"
11 #include "ivpu_hw_reg_io.h"
12 #include "ivpu_mmu.h"
13 #include "ivpu_mmu_context.h"
14 #include "ivpu_pm.h"
15 
16 #define IVPU_MMU_REG_IDR0		      0x00200000u
17 #define IVPU_MMU_REG_IDR1		      0x00200004u
18 #define IVPU_MMU_REG_IDR3		      0x0020000cu
19 #define IVPU_MMU_REG_IDR5		      0x00200014u
20 #define IVPU_MMU_REG_CR0		      0x00200020u
21 #define IVPU_MMU_REG_CR0ACK		      0x00200024u
22 #define IVPU_MMU_REG_CR0ACK_VAL_MASK	      GENMASK(31, 0)
23 #define IVPU_MMU_REG_CR0_ATSCHK_MASK	      BIT(4)
24 #define IVPU_MMU_REG_CR0_CMDQEN_MASK	      BIT(3)
25 #define IVPU_MMU_REG_CR0_EVTQEN_MASK	      BIT(2)
26 #define IVPU_MMU_REG_CR0_PRIQEN_MASK	      BIT(1)
27 #define IVPU_MMU_REG_CR0_SMMUEN_MASK	      BIT(0)
28 
29 #define IVPU_MMU_REG_CR1		      0x00200028u
30 #define IVPU_MMU_REG_CR2		      0x0020002cu
31 #define IVPU_MMU_REG_IRQ_CTRL		      0x00200050u
32 #define IVPU_MMU_REG_IRQ_CTRLACK	      0x00200054u
33 #define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK     GENMASK(31, 0)
34 
35 #define IVPU_MMU_REG_GERROR		      0x00200060u
36 #define IVPU_MMU_REG_GERROR_CMDQ_MASK	      BIT_MASK(0)
37 #define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK     BIT_MASK(2)
38 #define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK     BIT_MASK(3)
39 #define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
40 #define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
41 #define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
42 #define IVPU_MMU_REG_GERROR_MSI_ABT_MASK      BIT_MASK(7)
43 
44 #define IVPU_MMU_REG_GERRORN		      0x00200064u
45 
46 #define IVPU_MMU_REG_STRTAB_BASE	      0x00200080u
47 #define IVPU_MMU_REG_STRTAB_BASE_CFG	      0x00200088u
48 #define IVPU_MMU_REG_CMDQ_BASE		      0x00200090u
49 #define IVPU_MMU_REG_CMDQ_PROD		      0x00200098u
50 #define IVPU_MMU_REG_CMDQ_CONS		      0x0020009cu
51 #define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK	      GENMASK(23, 0)
52 #define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK	      GENMASK(30, 24)
53 #define IVPU_MMU_REG_EVTQ_BASE		      0x002000a0u
54 #define IVPU_MMU_REG_EVTQ_PROD		      0x002000a8u
55 #define IVPU_MMU_REG_EVTQ_CONS		      0x002000acu
56 #define IVPU_MMU_REG_EVTQ_PROD_SEC	      (0x002000a8u + SZ_64K)
57 #define IVPU_MMU_REG_EVTQ_CONS_SEC	      (0x002000acu + SZ_64K)
58 
59 #define IVPU_MMU_IDR0_REF		0x080f3e0f
60 #define IVPU_MMU_IDR0_REF_SIMICS	0x080f3e1f
61 #define IVPU_MMU_IDR1_REF		0x0e739d18
62 #define IVPU_MMU_IDR3_REF		0x0000003c
63 #define IVPU_MMU_IDR5_REF		0x00040070
64 #define IVPU_MMU_IDR5_REF_SIMICS	0x00000075
65 #define IVPU_MMU_IDR5_REF_FPGA		0x00800075
66 
67 #define IVPU_MMU_CDTAB_ENT_SIZE		64
68 #define IVPU_MMU_CDTAB_ENT_COUNT_LOG2	8 /* 256 entries */
69 #define IVPU_MMU_CDTAB_ENT_COUNT	((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
70 
71 #define IVPU_MMU_STREAM_ID0		0
72 #define IVPU_MMU_STREAM_ID3		3
73 
74 #define IVPU_MMU_STRTAB_ENT_SIZE	64
75 #define IVPU_MMU_STRTAB_ENT_COUNT	4
76 #define IVPU_MMU_STRTAB_CFG_LOG2SIZE	2
77 #define IVPU_MMU_STRTAB_CFG		IVPU_MMU_STRTAB_CFG_LOG2SIZE
78 
79 #define IVPU_MMU_Q_COUNT_LOG2		4 /* 16 entries */
80 #define IVPU_MMU_Q_COUNT		((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
81 #define IVPU_MMU_Q_WRAP_MASK            GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
82 #define IVPU_MMU_Q_IDX_MASK             (IVPU_MMU_Q_COUNT - 1)
83 #define IVPU_MMU_Q_IDX(val)		((val) & IVPU_MMU_Q_IDX_MASK)
84 #define IVPU_MMU_Q_WRP(val)             ((val) & IVPU_MMU_Q_COUNT)
85 
86 #define IVPU_MMU_CMDQ_CMD_SIZE		16
87 #define IVPU_MMU_CMDQ_SIZE		(IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
88 
89 #define IVPU_MMU_EVTQ_CMD_SIZE		32
90 #define IVPU_MMU_EVTQ_SIZE		(IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
91 
92 #define IVPU_MMU_CMD_OPCODE		GENMASK(7, 0)
93 
94 #define IVPU_MMU_CMD_SYNC_0_CS		GENMASK(13, 12)
95 #define IVPU_MMU_CMD_SYNC_0_MSH		GENMASK(23, 22)
96 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR	GENMASK(27, 24)
97 #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR	GENMASK(27, 24)
98 #define IVPU_MMU_CMD_SYNC_0_MSI_DATA	GENMASK(63, 32)
99 
100 #define IVPU_MMU_CMD_CFGI_0_SSEC	BIT(10)
101 #define IVPU_MMU_CMD_CFGI_0_SSV		BIT(11)
102 #define IVPU_MMU_CMD_CFGI_0_SSID	GENMASK(31, 12)
103 #define IVPU_MMU_CMD_CFGI_0_SID		GENMASK(63, 32)
104 #define IVPU_MMU_CMD_CFGI_1_RANGE	GENMASK(4, 0)
105 
106 #define IVPU_MMU_CMD_TLBI_0_ASID	GENMASK(63, 48)
107 #define IVPU_MMU_CMD_TLBI_0_VMID	GENMASK(47, 32)
108 
109 #define CMD_PREFETCH_CFG		0x1
110 #define CMD_CFGI_STE			0x3
111 #define CMD_CFGI_ALL			0x4
112 #define CMD_CFGI_CD			0x5
113 #define CMD_CFGI_CD_ALL			0x6
114 #define CMD_TLBI_NH_ASID		0x11
115 #define CMD_TLBI_EL2_ALL		0x20
116 #define CMD_TLBI_NSNH_ALL		0x30
117 #define CMD_SYNC			0x46
118 
119 #define IVPU_MMU_EVT_F_UUT		0x01
120 #define IVPU_MMU_EVT_C_BAD_STREAMID	0x02
121 #define IVPU_MMU_EVT_F_STE_FETCH	0x03
122 #define IVPU_MMU_EVT_C_BAD_STE		0x04
123 #define IVPU_MMU_EVT_F_BAD_ATS_TREQ	0x05
124 #define IVPU_MMU_EVT_F_STREAM_DISABLED	0x06
125 #define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN	0x07
126 #define IVPU_MMU_EVT_C_BAD_SUBSTREAMID	0x08
127 #define IVPU_MMU_EVT_F_CD_FETCH		0x09
128 #define IVPU_MMU_EVT_C_BAD_CD		0x0a
129 #define IVPU_MMU_EVT_F_WALK_EABT	0x0b
130 #define IVPU_MMU_EVT_F_TRANSLATION	0x10
131 #define IVPU_MMU_EVT_F_ADDR_SIZE	0x11
132 #define IVPU_MMU_EVT_F_ACCESS		0x12
133 #define IVPU_MMU_EVT_F_PERMISSION	0x13
134 #define IVPU_MMU_EVT_F_TLB_CONFLICT	0x20
135 #define IVPU_MMU_EVT_F_CFG_CONFLICT	0x21
136 #define IVPU_MMU_EVT_E_PAGE_REQUEST	0x24
137 #define IVPU_MMU_EVT_F_VMS_FETCH	0x25
138 
139 #define IVPU_MMU_EVT_OP_MASK		GENMASK_ULL(7, 0)
140 #define IVPU_MMU_EVT_SSID_MASK		GENMASK_ULL(31, 12)
141 
142 #define IVPU_MMU_Q_BASE_RWA		BIT(62)
143 #define IVPU_MMU_Q_BASE_ADDR_MASK	GENMASK_ULL(51, 5)
144 #define IVPU_MMU_STRTAB_BASE_RA		BIT(62)
145 #define IVPU_MMU_STRTAB_BASE_ADDR_MASK	GENMASK_ULL(51, 6)
146 
147 #define IVPU_MMU_IRQ_EVTQ_EN		BIT(2)
148 #define IVPU_MMU_IRQ_GERROR_EN		BIT(0)
149 
150 #define IVPU_MMU_CR1_TABLE_SH		GENMASK(11, 10)
151 #define IVPU_MMU_CR1_TABLE_OC		GENMASK(9, 8)
152 #define IVPU_MMU_CR1_TABLE_IC		GENMASK(7, 6)
153 #define IVPU_MMU_CR1_QUEUE_SH		GENMASK(5, 4)
154 #define IVPU_MMU_CR1_QUEUE_OC		GENMASK(3, 2)
155 #define IVPU_MMU_CR1_QUEUE_IC		GENMASK(1, 0)
156 #define IVPU_MMU_CACHE_NC		0
157 #define IVPU_MMU_CACHE_WB		1
158 #define IVPU_MMU_CACHE_WT		2
159 #define IVPU_MMU_SH_NSH			0
160 #define IVPU_MMU_SH_OSH			2
161 #define IVPU_MMU_SH_ISH			3
162 
163 #define IVPU_MMU_CMDQ_OP		GENMASK_ULL(7, 0)
164 
165 #define IVPU_MMU_CD_0_TCR_T0SZ		GENMASK_ULL(5, 0)
166 #define IVPU_MMU_CD_0_TCR_TG0		GENMASK_ULL(7, 6)
167 #define IVPU_MMU_CD_0_TCR_IRGN0		GENMASK_ULL(9, 8)
168 #define IVPU_MMU_CD_0_TCR_ORGN0		GENMASK_ULL(11, 10)
169 #define IVPU_MMU_CD_0_TCR_SH0		GENMASK_ULL(13, 12)
170 #define IVPU_MMU_CD_0_TCR_EPD0		BIT_ULL(14)
171 #define IVPU_MMU_CD_0_TCR_EPD1		BIT_ULL(30)
172 #define IVPU_MMU_CD_0_ENDI		BIT(15)
173 #define IVPU_MMU_CD_0_V			BIT(31)
174 #define IVPU_MMU_CD_0_TCR_IPS		GENMASK_ULL(34, 32)
175 #define IVPU_MMU_CD_0_TCR_TBI0		BIT_ULL(38)
176 #define IVPU_MMU_CD_0_AA64		BIT(41)
177 #define IVPU_MMU_CD_0_S			BIT(44)
178 #define IVPU_MMU_CD_0_R			BIT(45)
179 #define IVPU_MMU_CD_0_A			BIT(46)
180 #define IVPU_MMU_CD_0_ASET		BIT(47)
181 #define IVPU_MMU_CD_0_ASID		GENMASK_ULL(63, 48)
182 
183 #define IVPU_MMU_T0SZ_48BIT             16
184 #define IVPU_MMU_T0SZ_38BIT             26
185 
186 #define IVPU_MMU_IPS_48BIT		5
187 #define IVPU_MMU_IPS_44BIT		4
188 #define IVPU_MMU_IPS_42BIT		3
189 #define IVPU_MMU_IPS_40BIT		2
190 #define IVPU_MMU_IPS_36BIT		1
191 #define IVPU_MMU_IPS_32BIT		0
192 
193 #define IVPU_MMU_CD_1_TTB0_MASK		GENMASK_ULL(51, 4)
194 
195 #define IVPU_MMU_STE_0_S1CDMAX		GENMASK_ULL(63, 59)
196 #define IVPU_MMU_STE_0_S1FMT		GENMASK_ULL(5, 4)
197 #define IVPU_MMU_STE_0_S1FMT_LINEAR	0
198 #define IVPU_MMU_STE_DWORDS		8
199 #define IVPU_MMU_STE_0_CFG_S1_TRANS	5
200 #define IVPU_MMU_STE_0_CFG		GENMASK_ULL(3, 1)
201 #define IVPU_MMU_STE_0_S1CTXPTR_MASK	GENMASK_ULL(51, 6)
202 #define IVPU_MMU_STE_0_V			BIT(0)
203 
204 #define IVPU_MMU_STE_1_STRW_NSEL1	0ul
205 #define IVPU_MMU_STE_1_CONT		GENMASK_ULL(16, 13)
206 #define IVPU_MMU_STE_1_STRW		GENMASK_ULL(31, 30)
207 #define IVPU_MMU_STE_1_PRIVCFG		GENMASK_ULL(49, 48)
208 #define IVPU_MMU_STE_1_PRIVCFG_UNPRIV	2ul
209 #define IVPU_MMU_STE_1_INSTCFG		GENMASK_ULL(51, 50)
210 #define IVPU_MMU_STE_1_INSTCFG_DATA	2ul
211 #define IVPU_MMU_STE_1_MEV		BIT(19)
212 #define IVPU_MMU_STE_1_S1STALLD		BIT(27)
213 #define IVPU_MMU_STE_1_S1C_CACHE_NC	0ul
214 #define IVPU_MMU_STE_1_S1C_CACHE_WBRA	1ul
215 #define IVPU_MMU_STE_1_S1C_CACHE_WT	2ul
216 #define IVPU_MMU_STE_1_S1C_CACHE_WB	3ul
217 #define IVPU_MMU_STE_1_S1CIR		GENMASK_ULL(3, 2)
218 #define IVPU_MMU_STE_1_S1COR		GENMASK_ULL(5, 4)
219 #define IVPU_MMU_STE_1_S1CSH		GENMASK_ULL(7, 6)
220 #define IVPU_MMU_STE_1_S1DSS		GENMASK_ULL(1, 0)
221 #define IVPU_MMU_STE_1_S1DSS_TERMINATE	0x0
222 
223 #define IVPU_MMU_REG_TIMEOUT_US		(10 * USEC_PER_MSEC)
224 #define IVPU_MMU_QUEUE_TIMEOUT_US	(100 * USEC_PER_MSEC)
225 
226 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
227 				  (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
228 				  (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
229 				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
230 				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
231 				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
232 				  (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
233 
234 #define IVPU_MMU_CERROR_NONE         0x0
235 #define IVPU_MMU_CERROR_ILL          0x1
236 #define IVPU_MMU_CERROR_ABT          0x2
237 #define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
238 
ivpu_mmu_event_to_str(u32 cmd)239 static const char *ivpu_mmu_event_to_str(u32 cmd)
240 {
241 	switch (cmd) {
242 	case IVPU_MMU_EVT_F_UUT:
243 		return "Unsupported Upstream Transaction";
244 	case IVPU_MMU_EVT_C_BAD_STREAMID:
245 		return "Transaction StreamID out of range";
246 	case IVPU_MMU_EVT_F_STE_FETCH:
247 		return "Fetch of STE caused external abort";
248 	case IVPU_MMU_EVT_C_BAD_STE:
249 		return "Used STE invalid";
250 	case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
251 		return "Address Request disallowed for a StreamID";
252 	case IVPU_MMU_EVT_F_STREAM_DISABLED:
253 		return "Transaction marks non-substream disabled";
254 	case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
255 		return "MMU bypass is disallowed for this StreamID";
256 	case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
257 		return "Invalid StreamID";
258 	case IVPU_MMU_EVT_F_CD_FETCH:
259 		return "Fetch of CD caused external abort";
260 	case IVPU_MMU_EVT_C_BAD_CD:
261 		return "Fetched CD invalid";
262 	case IVPU_MMU_EVT_F_WALK_EABT:
263 		return " An external abort occurred fetching a TLB";
264 	case IVPU_MMU_EVT_F_TRANSLATION:
265 		return "Translation fault";
266 	case IVPU_MMU_EVT_F_ADDR_SIZE:
267 		return " Output address caused address size fault";
268 	case IVPU_MMU_EVT_F_ACCESS:
269 		return "Access flag fault";
270 	case IVPU_MMU_EVT_F_PERMISSION:
271 		return "Permission fault occurred on page access";
272 	case IVPU_MMU_EVT_F_TLB_CONFLICT:
273 		return "A TLB conflict";
274 	case IVPU_MMU_EVT_F_CFG_CONFLICT:
275 		return "A configuration cache conflict";
276 	case IVPU_MMU_EVT_E_PAGE_REQUEST:
277 		return "Page request hint from a client device";
278 	case IVPU_MMU_EVT_F_VMS_FETCH:
279 		return "Fetch of VMS caused external abort";
280 	default:
281 		return "Unknown event";
282 	}
283 }
284 
ivpu_mmu_cmdq_err_to_str(u32 err)285 static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
286 {
287 	switch (err) {
288 	case IVPU_MMU_CERROR_NONE:
289 		return "No error";
290 	case IVPU_MMU_CERROR_ILL:
291 		return "Illegal command";
292 	case IVPU_MMU_CERROR_ABT:
293 		return "External abort on command queue read";
294 	case IVPU_MMU_CERROR_ATC_INV_SYNC:
295 		return "Sync failed to complete ATS invalidation";
296 	default:
297 		return "Unknown error";
298 	}
299 }
300 
ivpu_mmu_config_check(struct ivpu_device * vdev)301 static void ivpu_mmu_config_check(struct ivpu_device *vdev)
302 {
303 	u32 val_ref;
304 	u32 val;
305 
306 	if (ivpu_is_simics(vdev))
307 		val_ref = IVPU_MMU_IDR0_REF_SIMICS;
308 	else
309 		val_ref = IVPU_MMU_IDR0_REF;
310 
311 	val = REGV_RD32(IVPU_MMU_REG_IDR0);
312 	if (val != val_ref)
313 		ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
314 
315 	val = REGV_RD32(IVPU_MMU_REG_IDR1);
316 	if (val != IVPU_MMU_IDR1_REF)
317 		ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
318 
319 	val = REGV_RD32(IVPU_MMU_REG_IDR3);
320 	if (val != IVPU_MMU_IDR3_REF)
321 		ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
322 
323 	if (ivpu_is_simics(vdev))
324 		val_ref = IVPU_MMU_IDR5_REF_SIMICS;
325 	else if (ivpu_is_fpga(vdev))
326 		val_ref = IVPU_MMU_IDR5_REF_FPGA;
327 	else
328 		val_ref = IVPU_MMU_IDR5_REF;
329 
330 	val = REGV_RD32(IVPU_MMU_REG_IDR5);
331 	if (val != val_ref)
332 		ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
333 }
334 
ivpu_mmu_cdtab_alloc(struct ivpu_device * vdev)335 static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
336 {
337 	struct ivpu_mmu_info *mmu = vdev->mmu;
338 	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
339 	size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
340 
341 	cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
342 	if (!cdtab->base)
343 		return -ENOMEM;
344 
345 	ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
346 
347 	return 0;
348 }
349 
ivpu_mmu_strtab_alloc(struct ivpu_device * vdev)350 static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
351 {
352 	struct ivpu_mmu_info *mmu = vdev->mmu;
353 	struct ivpu_mmu_strtab *strtab = &mmu->strtab;
354 	size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
355 
356 	strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
357 	if (!strtab->base)
358 		return -ENOMEM;
359 
360 	strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
361 	strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
362 	strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
363 
364 	ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
365 		 &strtab->dma, &strtab->dma_q, size);
366 
367 	return 0;
368 }
369 
ivpu_mmu_cmdq_alloc(struct ivpu_device * vdev)370 static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
371 {
372 	struct ivpu_mmu_info *mmu = vdev->mmu;
373 	struct ivpu_mmu_queue *q = &mmu->cmdq;
374 
375 	q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
376 	if (!q->base)
377 		return -ENOMEM;
378 
379 	q->dma_q = IVPU_MMU_Q_BASE_RWA;
380 	q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
381 	q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
382 
383 	ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
384 		 &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
385 
386 	return 0;
387 }
388 
ivpu_mmu_evtq_alloc(struct ivpu_device * vdev)389 static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
390 {
391 	struct ivpu_mmu_info *mmu = vdev->mmu;
392 	struct ivpu_mmu_queue *q = &mmu->evtq;
393 
394 	q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
395 	if (!q->base)
396 		return -ENOMEM;
397 
398 	q->dma_q = IVPU_MMU_Q_BASE_RWA;
399 	q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
400 	q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
401 
402 	ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
403 		 &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
404 
405 	return 0;
406 }
407 
ivpu_mmu_structs_alloc(struct ivpu_device * vdev)408 static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
409 {
410 	int ret;
411 
412 	ret = ivpu_mmu_cdtab_alloc(vdev);
413 	if (ret) {
414 		ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
415 		return ret;
416 	}
417 
418 	ret = ivpu_mmu_strtab_alloc(vdev);
419 	if (ret) {
420 		ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
421 		return ret;
422 	}
423 
424 	ret = ivpu_mmu_cmdq_alloc(vdev);
425 	if (ret) {
426 		ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
427 		return ret;
428 	}
429 
430 	ret = ivpu_mmu_evtq_alloc(vdev);
431 	if (ret)
432 		ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
433 
434 	return ret;
435 }
436 
ivpu_mmu_reg_write_cr0(struct ivpu_device * vdev,u32 val)437 static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val)
438 {
439 	REGV_WR32(IVPU_MMU_REG_CR0, val);
440 
441 	return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
442 }
443 
ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device * vdev,u32 val)444 static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val)
445 {
446 	REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val);
447 
448 	return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
449 }
450 
ivpu_mmu_irqs_setup(struct ivpu_device * vdev)451 static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
452 {
453 	u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
454 	int ret;
455 
456 	ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0);
457 	if (ret)
458 		return ret;
459 
460 	return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl);
461 }
462 
ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device * vdev)463 static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
464 {
465 	struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
466 	int ret;
467 
468 	ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod,
469 			    IVPU_MMU_QUEUE_TIMEOUT_US);
470 	if (ret)
471 		return ret;
472 
473 	cmdq->cons = cmdq->prod;
474 
475 	return 0;
476 }
477 
ivpu_mmu_queue_is_full(struct ivpu_mmu_queue * q)478 static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
479 {
480 	return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
481 		(IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
482 }
483 
ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue * q)484 static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
485 {
486 	return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
487 		(IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
488 }
489 
ivpu_mmu_cmdq_cmd_write(struct ivpu_device * vdev,const char * name,u64 data0,u64 data1)490 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
491 {
492 	struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
493 	u64 *queue_buffer = cmdq->base;
494 	int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
495 
496 	if (ivpu_mmu_queue_is_full(cmdq)) {
497 		ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
498 		return -EBUSY;
499 	}
500 
501 	queue_buffer[idx] = data0;
502 	queue_buffer[idx + 1] = data1;
503 	cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
504 
505 	ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
506 
507 	return 0;
508 }
509 
ivpu_mmu_cmdq_sync(struct ivpu_device * vdev)510 static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
511 {
512 	struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
513 	u64 val;
514 	int ret;
515 
516 	val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC);
517 
518 	ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
519 	if (ret)
520 		return ret;
521 
522 	if (!ivpu_is_force_snoop_enabled(vdev))
523 		clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
524 	REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
525 
526 	ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
527 	if (ret) {
528 		u32 err;
529 
530 		val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS);
531 		err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val);
532 
533 		ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
534 			 ivpu_mmu_cmdq_err_to_str(err));
535 		ivpu_hw_diagnose_failure(vdev);
536 	}
537 
538 	return ret;
539 }
540 
ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device * vdev)541 static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
542 {
543 	u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
544 	u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
545 
546 	return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
547 }
548 
ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device * vdev,u16 ssid)549 static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
550 {
551 	u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
552 		  FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
553 
554 	return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
555 }
556 
ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device * vdev)557 static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
558 {
559 	u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
560 
561 	return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
562 }
563 
ivpu_mmu_reset(struct ivpu_device * vdev)564 static int ivpu_mmu_reset(struct ivpu_device *vdev)
565 {
566 	struct ivpu_mmu_info *mmu = vdev->mmu;
567 	u32 val;
568 	int ret;
569 
570 	memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
571 	if (!ivpu_is_force_snoop_enabled(vdev))
572 		clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
573 	mmu->cmdq.prod = 0;
574 	mmu->cmdq.cons = 0;
575 
576 	memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
577 	mmu->evtq.prod = 0;
578 	mmu->evtq.cons = 0;
579 
580 	ret = ivpu_mmu_reg_write_cr0(vdev, 0);
581 	if (ret)
582 		return ret;
583 
584 	val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
585 	      FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
586 	      FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
587 	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
588 	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
589 	      FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
590 	REGV_WR32(IVPU_MMU_REG_CR1, val);
591 
592 	REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
593 	REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
594 
595 	REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
596 	REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
597 	REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
598 
599 	val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
600 	ret = ivpu_mmu_reg_write_cr0(vdev, val);
601 	if (ret)
602 		return ret;
603 
604 	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
605 	if (ret)
606 		return ret;
607 
608 	ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
609 	if (ret)
610 		return ret;
611 
612 	ret = ivpu_mmu_cmdq_sync(vdev);
613 	if (ret)
614 		return ret;
615 
616 	REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
617 	REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
618 	REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
619 
620 	val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
621 	ret = ivpu_mmu_reg_write_cr0(vdev, val);
622 	if (ret)
623 		return ret;
624 
625 	val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
626 	ret = ivpu_mmu_reg_write_cr0(vdev, val);
627 	if (ret)
628 		return ret;
629 
630 	ret = ivpu_mmu_irqs_setup(vdev);
631 	if (ret)
632 		return ret;
633 
634 	val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
635 	return ivpu_mmu_reg_write_cr0(vdev, val);
636 }
637 
ivpu_mmu_strtab_link_cd(struct ivpu_device * vdev,u32 sid)638 static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
639 {
640 	struct ivpu_mmu_info *mmu = vdev->mmu;
641 	struct ivpu_mmu_strtab *strtab = &mmu->strtab;
642 	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
643 	u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
644 	u64 str[2];
645 
646 	str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
647 		 FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
648 		 FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
649 		 IVPU_MMU_STE_0_V |
650 		 (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
651 
652 	str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
653 		 FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
654 		 FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
655 		 FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
656 		 FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
657 		 FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
658 		 FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
659 		 FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
660 		 IVPU_MMU_STE_1_MEV |
661 		 IVPU_MMU_STE_1_S1STALLD;
662 
663 	WRITE_ONCE(entry[1], str[1]);
664 	WRITE_ONCE(entry[0], str[0]);
665 
666 	if (!ivpu_is_force_snoop_enabled(vdev))
667 		clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
668 
669 	ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
670 }
671 
ivpu_mmu_strtab_init(struct ivpu_device * vdev)672 static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
673 {
674 	ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
675 	ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
676 
677 	return 0;
678 }
679 
ivpu_mmu_invalidate_tlb(struct ivpu_device * vdev,u16 ssid)680 int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
681 {
682 	struct ivpu_mmu_info *mmu = vdev->mmu;
683 	int ret = 0;
684 
685 	mutex_lock(&mmu->lock);
686 	if (!mmu->on)
687 		goto unlock;
688 
689 	ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
690 	if (ret)
691 		goto unlock;
692 
693 	ret = ivpu_mmu_cmdq_sync(vdev);
694 unlock:
695 	mutex_unlock(&mmu->lock);
696 	return ret;
697 }
698 
ivpu_mmu_cdtab_entry_set(struct ivpu_device * vdev,u32 ssid,u64 cd_dma,bool valid)699 static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid)
700 {
701 	struct ivpu_mmu_info *mmu = vdev->mmu;
702 	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
703 	u64 *entry;
704 	u64 cd[4];
705 	int ret = 0;
706 
707 	if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
708 		return -EINVAL;
709 
710 	entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
711 	drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid);
712 
713 	cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
714 		FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
715 		FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
716 		FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
717 		FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
718 		FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
719 		FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
720 		IVPU_MMU_CD_0_TCR_EPD1 |
721 		IVPU_MMU_CD_0_AA64 |
722 		IVPU_MMU_CD_0_R |
723 		IVPU_MMU_CD_0_ASET;
724 	cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
725 	cd[2] = 0;
726 	cd[3] = 0x0000000000007444;
727 
728 	/* For global and reserved contexts generate memory fault on VPU */
729 	if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
730 		cd[0] |= IVPU_MMU_CD_0_A;
731 
732 	if (valid)
733 		cd[0] |= IVPU_MMU_CD_0_V;
734 
735 	WRITE_ONCE(entry[1], cd[1]);
736 	WRITE_ONCE(entry[2], cd[2]);
737 	WRITE_ONCE(entry[3], cd[3]);
738 	WRITE_ONCE(entry[0], cd[0]);
739 
740 	if (!ivpu_is_force_snoop_enabled(vdev))
741 		clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
742 
743 	ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
744 		 valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
745 
746 	mutex_lock(&mmu->lock);
747 	if (!mmu->on)
748 		goto unlock;
749 
750 	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
751 	if (ret)
752 		goto err_invalidate;
753 
754 	ret = ivpu_mmu_cmdq_sync(vdev);
755 	if (ret)
756 		goto err_invalidate;
757 unlock:
758 	mutex_unlock(&mmu->lock);
759 	return 0;
760 
761 err_invalidate:
762 	WRITE_ONCE(entry[0], 0);
763 	mutex_unlock(&mmu->lock);
764 	return ret;
765 }
766 
ivpu_mmu_init(struct ivpu_device * vdev)767 int ivpu_mmu_init(struct ivpu_device *vdev)
768 {
769 	struct ivpu_mmu_info *mmu = vdev->mmu;
770 	int ret;
771 
772 	ivpu_dbg(vdev, MMU, "Init..\n");
773 
774 	ivpu_mmu_config_check(vdev);
775 
776 	ret = drmm_mutex_init(&vdev->drm, &mmu->lock);
777 	if (ret)
778 		return ret;
779 
780 	ret = ivpu_mmu_structs_alloc(vdev);
781 	if (ret)
782 		return ret;
783 
784 	ret = ivpu_mmu_strtab_init(vdev);
785 	if (ret) {
786 		ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
787 		return ret;
788 	}
789 
790 	ret = ivpu_mmu_enable(vdev);
791 	if (ret) {
792 		ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
793 		return ret;
794 	}
795 
796 	ivpu_dbg(vdev, MMU, "Init done\n");
797 
798 	return 0;
799 }
800 
ivpu_mmu_enable(struct ivpu_device * vdev)801 int ivpu_mmu_enable(struct ivpu_device *vdev)
802 {
803 	struct ivpu_mmu_info *mmu = vdev->mmu;
804 	int ret;
805 
806 	mutex_lock(&mmu->lock);
807 
808 	mmu->on = true;
809 
810 	ret = ivpu_mmu_reset(vdev);
811 	if (ret) {
812 		ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
813 		goto err;
814 	}
815 
816 	ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
817 	if (ret)
818 		goto err;
819 
820 	ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
821 	if (ret)
822 		goto err;
823 
824 	ret = ivpu_mmu_cmdq_sync(vdev);
825 	if (ret)
826 		goto err;
827 
828 	mutex_unlock(&mmu->lock);
829 
830 	return 0;
831 err:
832 	mmu->on = false;
833 	mutex_unlock(&mmu->lock);
834 	return ret;
835 }
836 
ivpu_mmu_disable(struct ivpu_device * vdev)837 void ivpu_mmu_disable(struct ivpu_device *vdev)
838 {
839 	struct ivpu_mmu_info *mmu = vdev->mmu;
840 
841 	mutex_lock(&mmu->lock);
842 	mmu->on = false;
843 	mutex_unlock(&mmu->lock);
844 }
845 
ivpu_mmu_dump_event(struct ivpu_device * vdev,u32 * event)846 static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
847 {
848 	u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
849 	u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
850 	u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
851 	u64 in_addr = ((u64)event[5]) << 32 | event[4];
852 	u32 sid = event[1];
853 
854 	ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
855 			     op, ivpu_mmu_event_to_str(op), ssid, sid,
856 			     event[2], event[3], in_addr, fetch_addr);
857 }
858 
ivpu_mmu_get_event(struct ivpu_device * vdev)859 static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
860 {
861 	struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
862 	u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
863 	u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
864 
865 	evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
866 	if (ivpu_mmu_queue_is_empty(evtq))
867 		return NULL;
868 
869 	evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
870 	return evt;
871 }
872 
ivpu_mmu_evtq_set(struct ivpu_device * vdev,bool enable)873 static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
874 {
875 	u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
876 
877 	if (enable)
878 		val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
879 	else
880 		val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
881 	REGV_WR32(IVPU_MMU_REG_CR0, val);
882 
883 	return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
884 }
885 
ivpu_mmu_evtq_enable(struct ivpu_device * vdev)886 static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
887 {
888 	return ivpu_mmu_evtq_set(vdev, true);
889 }
890 
ivpu_mmu_evtq_disable(struct ivpu_device * vdev)891 static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
892 {
893 	return ivpu_mmu_evtq_set(vdev, false);
894 }
895 
ivpu_mmu_discard_events(struct ivpu_device * vdev)896 void ivpu_mmu_discard_events(struct ivpu_device *vdev)
897 {
898 	struct ivpu_mmu_info *mmu = vdev->mmu;
899 
900 	mutex_lock(&mmu->lock);
901 	/*
902 	 * Disable event queue (stop MMU from updating the producer)
903 	 * to allow synchronization of consumer and producer indexes
904 	 */
905 	ivpu_mmu_evtq_disable(vdev);
906 
907 	vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
908 	REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
909 	vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
910 
911 	ivpu_mmu_evtq_enable(vdev);
912 
913 	drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
914 
915 	mutex_unlock(&mmu->lock);
916 }
917 
ivpu_mmu_disable_ssid_events(struct ivpu_device * vdev,u32 ssid)918 int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
919 {
920 	struct ivpu_mmu_info *mmu = vdev->mmu;
921 	struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
922 	u64 *entry;
923 	u64 val;
924 
925 	if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
926 		return -EINVAL;
927 
928 	mutex_lock(&mmu->lock);
929 
930 	entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
931 
932 	val = READ_ONCE(entry[0]);
933 	val &= ~IVPU_MMU_CD_0_R;
934 	WRITE_ONCE(entry[0], val);
935 
936 	if (!ivpu_is_force_snoop_enabled(vdev))
937 		clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
938 
939 	ivpu_mmu_cmdq_write_cfgi_all(vdev);
940 	ivpu_mmu_cmdq_sync(vdev);
941 
942 	mutex_unlock(&mmu->lock);
943 
944 	return 0;
945 }
946 
ivpu_mmu_irq_evtq_handler(struct ivpu_device * vdev)947 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
948 {
949 	struct ivpu_file_priv *file_priv;
950 	u32 *event;
951 	u32 ssid;
952 
953 	ivpu_dbg(vdev, IRQ, "MMU event queue\n");
954 
955 	while ((event = ivpu_mmu_get_event(vdev))) {
956 		ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
957 		if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
958 		    ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
959 			ivpu_mmu_dump_event(vdev, event);
960 			ivpu_pm_trigger_recovery(vdev, "MMU event");
961 			return;
962 		}
963 
964 		file_priv = xa_load(&vdev->context_xa, ssid);
965 		if (file_priv) {
966 			if (!READ_ONCE(file_priv->has_mmu_faults)) {
967 				ivpu_mmu_dump_event(vdev, event);
968 				WRITE_ONCE(file_priv->has_mmu_faults, true);
969 			}
970 		}
971 	}
972 
973 	queue_work(system_wq, &vdev->context_abort_work);
974 }
975 
ivpu_mmu_evtq_dump(struct ivpu_device * vdev)976 void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
977 {
978 	u32 *event;
979 
980 	while ((event = ivpu_mmu_get_event(vdev)) != NULL)
981 		ivpu_mmu_dump_event(vdev, event);
982 }
983 
ivpu_mmu_irq_gerr_handler(struct ivpu_device * vdev)984 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
985 {
986 	u32 gerror_val, gerrorn_val, active;
987 
988 	ivpu_dbg(vdev, IRQ, "MMU error\n");
989 
990 	gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
991 	gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
992 
993 	active = gerror_val ^ gerrorn_val;
994 	if (!(active & IVPU_MMU_GERROR_ERR_MASK))
995 		return;
996 
997 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
998 		ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
999 
1000 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
1001 		ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
1002 
1003 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
1004 		ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
1005 
1006 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
1007 		ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
1008 
1009 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
1010 		ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
1011 
1012 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
1013 		ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
1014 
1015 	if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
1016 		ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
1017 
1018 	REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
1019 }
1020 
ivpu_mmu_cd_set(struct ivpu_device * vdev,int ssid,struct ivpu_mmu_pgtable * pgtable)1021 int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
1022 {
1023 	return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true);
1024 }
1025 
ivpu_mmu_cd_clear(struct ivpu_device * vdev,int ssid)1026 void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid)
1027 {
1028 	ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false);
1029 }
1030