1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <drm/panfrost_drm.h>
5
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
19
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
25
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
28
mair_to_memattr(u64 mair,bool coherent)29 static u64 mair_to_memattr(u64 mair, bool coherent)
30 {
31 u64 memattr = 0;
32 u32 i;
33
34 for (i = 0; i < 8; i++) {
35 u8 in_attr = mair >> (8 * i), out_attr;
36 u8 outer = in_attr >> 4, inner = in_attr & 0xf;
37
38 /* For caching to be enabled, inner and outer caching policy
39 * have to be both write-back, if one of them is write-through
40 * or non-cacheable, we just choose non-cacheable. Device
41 * memory is also translated to non-cacheable.
42 */
43 if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
44 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
45 AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
46 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
47 } else {
48 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
49 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
50 /* Use SH_MIDGARD_INNER mode when device isn't coherent,
51 * so SH_IS, which is used when IOMMU_CACHE is set, maps
52 * to Mali's internal-shareable mode. As per the Mali
53 * Spec, inner and outer-shareable modes aren't allowed
54 * for WB memory when coherency is disabled.
55 * Use SH_CPU_INNER mode when coherency is enabled, so
56 * that SH_IS actually maps to the standard definition of
57 * inner-shareable.
58 */
59 if (!coherent)
60 out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
61 else
62 out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
63 }
64
65 memattr |= (u64)out_attr << (8 * i);
66 }
67
68 return memattr;
69 }
70
wait_ready(struct panfrost_device * pfdev,u32 as_nr)71 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
72 {
73 int ret;
74 u32 val;
75
76 /* Wait for the MMU status to indicate there is no active command, in
77 * case one is pending. */
78 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
79 val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
80
81 if (ret) {
82 /* The GPU hung, let's trigger a reset */
83 panfrost_device_schedule_reset(pfdev);
84 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
85 }
86
87 return ret;
88 }
89
write_cmd(struct panfrost_device * pfdev,u32 as_nr,u32 cmd)90 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
91 {
92 int status;
93
94 /* write AS_COMMAND when MMU is ready to accept another command */
95 status = wait_ready(pfdev, as_nr);
96 if (!status)
97 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
98
99 return status;
100 }
101
lock_region(struct panfrost_device * pfdev,u32 as_nr,u64 region_start,u64 size)102 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
103 u64 region_start, u64 size)
104 {
105 u8 region_width;
106 u64 region;
107 u64 region_end = region_start + size;
108
109 if (!size)
110 return;
111
112 /*
113 * The locked region is a naturally aligned power of 2 block encoded as
114 * log2 minus(1).
115 * Calculate the desired start/end and look for the highest bit which
116 * differs. The smallest naturally aligned block must include this bit
117 * change, the desired region starts with this bit (and subsequent bits)
118 * zeroed and ends with the bit (and subsequent bits) set to one.
119 */
120 region_width = max(fls64(region_start ^ (region_end - 1)),
121 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
122
123 /*
124 * Mask off the low bits of region_start (which would be ignored by
125 * the hardware anyway)
126 */
127 region_start &= GENMASK_ULL(63, region_width);
128
129 region = region_width | region_start;
130
131 /* Lock the region that needs to be updated */
132 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
133 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
134 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
135 }
136
137
mmu_hw_do_operation_locked(struct panfrost_device * pfdev,int as_nr,u64 iova,u64 size,u32 op)138 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
139 u64 iova, u64 size, u32 op)
140 {
141 if (as_nr < 0)
142 return 0;
143
144 if (op != AS_COMMAND_UNLOCK)
145 lock_region(pfdev, as_nr, iova, size);
146
147 /* Run the MMU operation */
148 write_cmd(pfdev, as_nr, op);
149
150 /* Wait for the flush to complete */
151 return wait_ready(pfdev, as_nr);
152 }
153
mmu_hw_do_operation(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size,u32 op)154 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
155 struct panfrost_mmu *mmu,
156 u64 iova, u64 size, u32 op)
157 {
158 int ret;
159
160 spin_lock(&pfdev->as_lock);
161 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
162 spin_unlock(&pfdev->as_lock);
163 return ret;
164 }
165
panfrost_mmu_enable(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)166 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
167 {
168 int as_nr = mmu->as;
169 u64 transtab = mmu->cfg.transtab;
170 u64 memattr = mmu->cfg.memattr;
171 u64 transcfg = mmu->cfg.transcfg;
172
173 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
174
175 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
176 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
177
178 /* Need to revisit mem attrs.
179 * NC is the default, Mali driver is inner WT.
180 */
181 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
182 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
183
184 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
185 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
186
187 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
188 }
189
panfrost_mmu_disable(struct panfrost_device * pfdev,u32 as_nr)190 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
191 {
192 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
193
194 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
195 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
196
197 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
198 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
199
200 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
201 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
202
203 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
204 }
205
mmu_cfg_init_mali_lpae(struct panfrost_mmu * mmu)206 static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
207 {
208 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
209
210 /* TODO: The following fields are duplicated between the MMU and Page
211 * Table config structs. Ideally, should be kept in one place.
212 */
213 mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
214 mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
215 mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
216
217 return 0;
218 }
219
mmu_cfg_init_aarch64_4k(struct panfrost_mmu * mmu)220 static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
221 {
222 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
223 struct panfrost_device *pfdev = mmu->pfdev;
224
225 if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
226 ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
227 return -EINVAL;
228
229 mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
230
231 mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
232 pgtbl_cfg->coherent_walk);
233
234 mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
235 AS_TRANSCFG_PTW_RA |
236 AS_TRANSCFG_ADRMODE_AARCH64_4K |
237 AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
238 if (pgtbl_cfg->coherent_walk)
239 mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
240
241 return 0;
242 }
243
panfrost_mmu_cfg_init(struct panfrost_mmu * mmu,enum io_pgtable_fmt fmt)244 static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
245 enum io_pgtable_fmt fmt)
246 {
247 struct panfrost_device *pfdev = mmu->pfdev;
248
249 switch (fmt) {
250 case ARM_64_LPAE_S1:
251 return mmu_cfg_init_aarch64_4k(mmu);
252 case ARM_MALI_LPAE:
253 return mmu_cfg_init_mali_lpae(mmu);
254 default:
255 /* This should never happen */
256 drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
257 return -EINVAL;
258 }
259 }
260
panfrost_mmu_as_get(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)261 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
262 {
263 int as;
264
265 spin_lock(&pfdev->as_lock);
266
267 as = mmu->as;
268 if (as >= 0) {
269 int en = atomic_inc_return(&mmu->as_count);
270 u32 mask = BIT(as) | BIT(16 + as);
271
272 /*
273 * AS can be retained by active jobs or a perfcnt context,
274 * hence the '+ 1' here.
275 */
276 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
277
278 list_move(&mmu->list, &pfdev->as_lru_list);
279
280 if (pfdev->as_faulty_mask & mask) {
281 /* Unhandled pagefault on this AS, the MMU was
282 * disabled. We need to re-enable the MMU after
283 * clearing+unmasking the AS interrupts.
284 */
285 mmu_write(pfdev, MMU_INT_CLEAR, mask);
286 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
287 pfdev->as_faulty_mask &= ~mask;
288 panfrost_mmu_enable(pfdev, mmu);
289 }
290
291 goto out;
292 }
293
294 /* Check for a free AS */
295 as = ffz(pfdev->as_alloc_mask);
296 if (!(BIT(as) & pfdev->features.as_present)) {
297 struct panfrost_mmu *lru_mmu;
298
299 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
300 if (!atomic_read(&lru_mmu->as_count))
301 break;
302 }
303 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
304
305 list_del_init(&lru_mmu->list);
306 as = lru_mmu->as;
307
308 WARN_ON(as < 0);
309 lru_mmu->as = -1;
310 }
311
312 /* Assign the free or reclaimed AS to the FD */
313 mmu->as = as;
314 set_bit(as, &pfdev->as_alloc_mask);
315 atomic_set(&mmu->as_count, 1);
316 list_add(&mmu->list, &pfdev->as_lru_list);
317
318 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
319
320 panfrost_mmu_enable(pfdev, mmu);
321
322 out:
323 spin_unlock(&pfdev->as_lock);
324 return as;
325 }
326
panfrost_mmu_as_put(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)327 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
328 {
329 atomic_dec(&mmu->as_count);
330 WARN_ON(atomic_read(&mmu->as_count) < 0);
331 }
332
panfrost_mmu_reset(struct panfrost_device * pfdev)333 void panfrost_mmu_reset(struct panfrost_device *pfdev)
334 {
335 struct panfrost_mmu *mmu, *mmu_tmp;
336
337 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
338
339 spin_lock(&pfdev->as_lock);
340
341 pfdev->as_alloc_mask = 0;
342 pfdev->as_faulty_mask = 0;
343
344 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
345 mmu->as = -1;
346 atomic_set(&mmu->as_count, 0);
347 list_del_init(&mmu->list);
348 }
349
350 spin_unlock(&pfdev->as_lock);
351
352 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
353 mmu_write(pfdev, MMU_INT_MASK, ~0);
354 }
355
get_pgsize(u64 addr,size_t size,size_t * count)356 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
357 {
358 /*
359 * io-pgtable only operates on multiple pages within a single table
360 * entry, so we need to split at boundaries of the table size, i.e.
361 * the next block size up. The distance from address A to the next
362 * boundary of block size B is logically B - A % B, but in unsigned
363 * two's complement where B is a power of two we get the equivalence
364 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
365 */
366 size_t blk_offset = -addr % SZ_2M;
367
368 if (blk_offset || size < SZ_2M) {
369 *count = min_not_zero(blk_offset, size) / SZ_4K;
370 return SZ_4K;
371 }
372 blk_offset = -addr % SZ_1G ?: SZ_1G;
373 *count = min(blk_offset, size) / SZ_2M;
374 return SZ_2M;
375 }
376
panfrost_mmu_flush_range(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size)377 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
378 struct panfrost_mmu *mmu,
379 u64 iova, u64 size)
380 {
381 if (mmu->as < 0)
382 return;
383
384 pm_runtime_get_noresume(pfdev->dev);
385
386 /* Flush the PTs only if we're already awake */
387 if (pm_runtime_active(pfdev->dev))
388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
389
390 pm_runtime_put_autosuspend(pfdev->dev);
391 }
392
mmu_map_sg(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,int prot,struct sg_table * sgt)393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
394 u64 iova, int prot, struct sg_table *sgt)
395 {
396 unsigned int count;
397 struct scatterlist *sgl;
398 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
399 u64 start_iova = iova;
400
401 for_each_sgtable_dma_sg(sgt, sgl, count) {
402 unsigned long paddr = sg_dma_address(sgl);
403 size_t len = sg_dma_len(sgl);
404
405 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
406
407 while (len) {
408 size_t pgcount, mapped = 0;
409 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
410
411 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
412 GFP_KERNEL, &mapped);
413 /* Don't get stuck if things have gone wrong */
414 mapped = max(mapped, pgsize);
415 iova += mapped;
416 paddr += mapped;
417 len -= mapped;
418 }
419 }
420
421 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
422
423 return 0;
424 }
425
panfrost_mmu_map(struct panfrost_gem_mapping * mapping)426 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
427 {
428 struct panfrost_gem_object *bo = mapping->obj;
429 struct drm_gem_shmem_object *shmem = &bo->base;
430 struct drm_gem_object *obj = &shmem->base;
431 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
432 struct sg_table *sgt;
433 int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
434
435 if (WARN_ON(mapping->active))
436 return 0;
437
438 if (bo->noexec)
439 prot |= IOMMU_NOEXEC;
440
441 sgt = drm_gem_shmem_get_pages_sgt(shmem);
442 if (WARN_ON(IS_ERR(sgt)))
443 return PTR_ERR(sgt);
444
445 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
446 prot, sgt);
447 mapping->active = true;
448
449 return 0;
450 }
451
panfrost_mmu_unmap(struct panfrost_gem_mapping * mapping)452 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
453 {
454 struct panfrost_gem_object *bo = mapping->obj;
455 struct drm_gem_object *obj = &bo->base.base;
456 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
457 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
458 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
459 size_t len = mapping->mmnode.size << PAGE_SHIFT;
460 size_t unmapped_len = 0;
461
462 if (WARN_ON(!mapping->active))
463 return;
464
465 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
466 mapping->mmu->as, iova, len);
467
468 while (unmapped_len < len) {
469 size_t unmapped_page, pgcount;
470 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
471
472 if (bo->is_heap)
473 pgcount = 1;
474 if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
475 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
476 WARN_ON(unmapped_page != pgsize * pgcount);
477 }
478 iova += pgsize * pgcount;
479 unmapped_len += pgsize * pgcount;
480 }
481
482 panfrost_mmu_flush_range(pfdev, mapping->mmu,
483 mapping->mmnode.start << PAGE_SHIFT, len);
484 mapping->active = false;
485 }
486
mmu_tlb_inv_context_s1(void * cookie)487 static void mmu_tlb_inv_context_s1(void *cookie)
488 {}
489
mmu_tlb_sync_context(void * cookie)490 static void mmu_tlb_sync_context(void *cookie)
491 {
492 //struct panfrost_mmu *mmu = cookie;
493 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
494 }
495
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)496 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
497 void *cookie)
498 {
499 mmu_tlb_sync_context(cookie);
500 }
501
502 static const struct iommu_flush_ops mmu_tlb_ops = {
503 .tlb_flush_all = mmu_tlb_inv_context_s1,
504 .tlb_flush_walk = mmu_tlb_flush_walk,
505 };
506
507 static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device * pfdev,int as,u64 addr)508 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
509 {
510 struct panfrost_gem_mapping *mapping = NULL;
511 struct drm_mm_node *node;
512 u64 offset = addr >> PAGE_SHIFT;
513 struct panfrost_mmu *mmu;
514
515 spin_lock(&pfdev->as_lock);
516 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
517 if (as == mmu->as)
518 goto found_mmu;
519 }
520 goto out;
521
522 found_mmu:
523
524 spin_lock(&mmu->mm_lock);
525
526 drm_mm_for_each_node(node, &mmu->mm) {
527 if (offset >= node->start &&
528 offset < (node->start + node->size)) {
529 mapping = drm_mm_node_to_panfrost_mapping(node);
530
531 kref_get(&mapping->refcount);
532 break;
533 }
534 }
535
536 spin_unlock(&mmu->mm_lock);
537 out:
538 spin_unlock(&pfdev->as_lock);
539 return mapping;
540 }
541
542 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
543
panfrost_mmu_map_fault_addr(struct panfrost_device * pfdev,int as,u64 addr)544 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
545 u64 addr)
546 {
547 int ret, i;
548 struct panfrost_gem_mapping *bomapping;
549 struct panfrost_gem_object *bo;
550 struct address_space *mapping;
551 struct drm_gem_object *obj;
552 pgoff_t page_offset;
553 struct sg_table *sgt;
554 struct page **pages;
555
556 bomapping = addr_to_mapping(pfdev, as, addr);
557 if (!bomapping)
558 return -ENOENT;
559
560 bo = bomapping->obj;
561 if (!bo->is_heap) {
562 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
563 bomapping->mmnode.start << PAGE_SHIFT);
564 ret = -EINVAL;
565 goto err_bo;
566 }
567 WARN_ON(bomapping->mmu->as != as);
568
569 /* Assume 2MB alignment and size multiple */
570 addr &= ~((u64)SZ_2M - 1);
571 page_offset = addr >> PAGE_SHIFT;
572 page_offset -= bomapping->mmnode.start;
573
574 obj = &bo->base.base;
575
576 dma_resv_lock(obj->resv, NULL);
577
578 if (!bo->base.pages) {
579 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
580 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
581 if (!bo->sgts) {
582 ret = -ENOMEM;
583 goto err_unlock;
584 }
585
586 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
587 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
588 if (!pages) {
589 kvfree(bo->sgts);
590 bo->sgts = NULL;
591 ret = -ENOMEM;
592 goto err_unlock;
593 }
594 bo->base.pages = pages;
595 refcount_set(&bo->base.pages_use_count, 1);
596 } else {
597 pages = bo->base.pages;
598 if (pages[page_offset]) {
599 /* Pages are already mapped, bail out. */
600 goto out;
601 }
602 }
603
604 mapping = bo->base.base.filp->f_mapping;
605 mapping_set_unevictable(mapping);
606
607 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
608 /* Can happen if the last fault only partially filled this
609 * section of the pages array before failing. In that case
610 * we skip already filled pages.
611 */
612 if (pages[i])
613 continue;
614
615 pages[i] = shmem_read_mapping_page(mapping, i);
616 if (IS_ERR(pages[i])) {
617 ret = PTR_ERR(pages[i]);
618 pages[i] = NULL;
619 goto err_unlock;
620 }
621 }
622
623 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
624 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
625 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
626 if (ret)
627 goto err_unlock;
628
629 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
630 if (ret)
631 goto err_map;
632
633 mmu_map_sg(pfdev, bomapping->mmu, addr,
634 IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
635
636 bomapping->active = true;
637 bo->heap_rss_size += SZ_2M;
638
639 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
640
641 out:
642 dma_resv_unlock(obj->resv);
643
644 panfrost_gem_mapping_put(bomapping);
645
646 return 0;
647
648 err_map:
649 sg_free_table(sgt);
650 err_unlock:
651 dma_resv_unlock(obj->resv);
652 err_bo:
653 panfrost_gem_mapping_put(bomapping);
654 return ret;
655 }
656
panfrost_mmu_release_ctx(struct kref * kref)657 static void panfrost_mmu_release_ctx(struct kref *kref)
658 {
659 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
660 refcount);
661 struct panfrost_device *pfdev = mmu->pfdev;
662
663 spin_lock(&pfdev->as_lock);
664 if (mmu->as >= 0) {
665 pm_runtime_get_noresume(pfdev->dev);
666 if (pm_runtime_active(pfdev->dev))
667 panfrost_mmu_disable(pfdev, mmu->as);
668 pm_runtime_put_autosuspend(pfdev->dev);
669
670 clear_bit(mmu->as, &pfdev->as_alloc_mask);
671 clear_bit(mmu->as, &pfdev->as_in_use_mask);
672 list_del(&mmu->list);
673 }
674 spin_unlock(&pfdev->as_lock);
675
676 free_io_pgtable_ops(mmu->pgtbl_ops);
677 drm_mm_takedown(&mmu->mm);
678 kfree(mmu);
679 }
680
panfrost_mmu_ctx_put(struct panfrost_mmu * mmu)681 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
682 {
683 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
684 }
685
panfrost_mmu_ctx_get(struct panfrost_mmu * mmu)686 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
687 {
688 kref_get(&mmu->refcount);
689
690 return mmu;
691 }
692
693 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
694 #define PFN_4G_MASK (PFN_4G - 1)
695 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
696
panfrost_drm_mm_color_adjust(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)697 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
698 unsigned long color,
699 u64 *start, u64 *end)
700 {
701 /* Executable buffers can't start or end on a 4GB boundary */
702 if (!(color & PANFROST_BO_NOEXEC)) {
703 u64 next_seg;
704
705 if ((*start & PFN_4G_MASK) == 0)
706 (*start)++;
707
708 if ((*end & PFN_4G_MASK) == 0)
709 (*end)--;
710
711 next_seg = ALIGN(*start, PFN_4G);
712 if (next_seg - *start <= PFN_16M)
713 *start = next_seg + 1;
714
715 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
716 }
717 }
718
panfrost_mmu_ctx_create(struct panfrost_device * pfdev)719 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
720 {
721 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
722 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
723 struct panfrost_mmu *mmu;
724 enum io_pgtable_fmt fmt;
725 int ret;
726
727 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
728 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
729 dev_err_once(pfdev->dev,
730 "AARCH64_4K page table not supported\n");
731 return ERR_PTR(-EINVAL);
732 }
733 fmt = ARM_64_LPAE_S1;
734 } else {
735 fmt = ARM_MALI_LPAE;
736 }
737
738 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
739 if (!mmu)
740 return ERR_PTR(-ENOMEM);
741
742 mmu->pfdev = pfdev;
743 spin_lock_init(&mmu->mm_lock);
744
745 /* 4G enough for now. can be 48-bit */
746 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
747 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
748
749 INIT_LIST_HEAD(&mmu->list);
750 mmu->as = -1;
751
752 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
753 .pgsize_bitmap = SZ_4K | SZ_2M,
754 .ias = va_bits,
755 .oas = pa_bits,
756 .coherent_walk = pfdev->coherent,
757 .tlb = &mmu_tlb_ops,
758 .iommu_dev = pfdev->dev,
759 };
760
761 mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
762 if (!mmu->pgtbl_ops) {
763 ret = -EINVAL;
764 goto err_free_mmu;
765 }
766
767 ret = panfrost_mmu_cfg_init(mmu, fmt);
768 if (ret)
769 goto err_free_io_pgtable;
770
771 kref_init(&mmu->refcount);
772
773 return mmu;
774
775 err_free_io_pgtable:
776 free_io_pgtable_ops(mmu->pgtbl_ops);
777
778 err_free_mmu:
779 kfree(mmu);
780 return ERR_PTR(ret);
781 }
782
access_type_name(struct panfrost_device * pfdev,u32 fault_status)783 static const char *access_type_name(struct panfrost_device *pfdev,
784 u32 fault_status)
785 {
786 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
787 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
788 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
789 return "ATOMIC";
790 else
791 return "UNKNOWN";
792 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
793 return "READ";
794 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
795 return "WRITE";
796 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
797 return "EXECUTE";
798 default:
799 WARN_ON(1);
800 return NULL;
801 }
802 }
803
panfrost_mmu_irq_handler(int irq,void * data)804 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
805 {
806 struct panfrost_device *pfdev = data;
807
808 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
809 return IRQ_NONE;
810
811 if (!mmu_read(pfdev, MMU_INT_STAT))
812 return IRQ_NONE;
813
814 mmu_write(pfdev, MMU_INT_MASK, 0);
815 return IRQ_WAKE_THREAD;
816 }
817
panfrost_mmu_irq_handler_thread(int irq,void * data)818 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
819 {
820 struct panfrost_device *pfdev = data;
821 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
822 int ret;
823
824 while (status) {
825 u32 as = ffs(status | (status >> 16)) - 1;
826 u32 mask = BIT(as) | BIT(as + 16);
827 u64 addr;
828 u32 fault_status;
829 u32 exception_type;
830 u32 access_type;
831 u32 source_id;
832
833 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
834 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
835 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
836
837 /* decode the fault status */
838 exception_type = fault_status & 0xFF;
839 access_type = (fault_status >> 8) & 0x3;
840 source_id = (fault_status >> 16);
841
842 mmu_write(pfdev, MMU_INT_CLEAR, mask);
843
844 /* Page fault only */
845 ret = -1;
846 if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
847 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
848
849 if (ret) {
850 /* terminal fault, print info about the fault */
851 dev_err(pfdev->dev,
852 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
853 "Reason: %s\n"
854 "raw fault status: 0x%X\n"
855 "decoded fault status: %s\n"
856 "exception type 0x%X: %s\n"
857 "access type 0x%X: %s\n"
858 "source id 0x%X\n",
859 as, addr,
860 "TODO",
861 fault_status,
862 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
863 exception_type, panfrost_exception_name(exception_type),
864 access_type, access_type_name(pfdev, fault_status),
865 source_id);
866
867 spin_lock(&pfdev->as_lock);
868 /* Ignore MMU interrupts on this AS until it's been
869 * re-enabled.
870 */
871 pfdev->as_faulty_mask |= mask;
872
873 /* Disable the MMU to kill jobs on this AS. */
874 panfrost_mmu_disable(pfdev, as);
875 spin_unlock(&pfdev->as_lock);
876 }
877
878 status &= ~mask;
879
880 /* If we received new MMU interrupts, process them before returning. */
881 if (!status)
882 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
883 }
884
885 /* Enable interrupts only if we're not about to get suspended */
886 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
887 spin_lock(&pfdev->as_lock);
888 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
889 spin_unlock(&pfdev->as_lock);
890 }
891
892 return IRQ_HANDLED;
893 };
894
panfrost_mmu_init(struct panfrost_device * pfdev)895 int panfrost_mmu_init(struct panfrost_device *pfdev)
896 {
897 int err;
898
899 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
900 if (pfdev->mmu_irq < 0)
901 return pfdev->mmu_irq;
902
903 err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
904 panfrost_mmu_irq_handler,
905 panfrost_mmu_irq_handler_thread,
906 IRQF_SHARED, KBUILD_MODNAME "-mmu",
907 pfdev);
908
909 if (err) {
910 dev_err(pfdev->dev, "failed to request mmu irq");
911 return err;
912 }
913
914 return 0;
915 }
916
panfrost_mmu_fini(struct panfrost_device * pfdev)917 void panfrost_mmu_fini(struct panfrost_device *pfdev)
918 {
919 mmu_write(pfdev, MMU_INT_MASK, 0);
920 }
921
panfrost_mmu_suspend_irq(struct panfrost_device * pfdev)922 void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
923 {
924 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
925
926 mmu_write(pfdev, MMU_INT_MASK, 0);
927 synchronize_irq(pfdev->mmu_irq);
928 }
929