1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014-2016 Intel Corporation
4 */
5
6 #include <linux/anon_inodes.h>
7 #include <linux/mman.h>
8 #include <linux/sizes.h>
9
10 #include <drm/drm_cache.h>
11
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
14
15 #include "i915_drv.h"
16 #include "i915_gem_evict.h"
17 #include "i915_gem_gtt.h"
18 #include "i915_gem_ioctls.h"
19 #include "i915_gem_object.h"
20 #include "i915_gem_mman.h"
21 #include "i915_mm.h"
22 #include "i915_trace.h"
23 #include "i915_user_extensions.h"
24 #include "i915_gem_ttm.h"
25 #include "i915_vma.h"
26
27 static inline bool
__vma_matches(struct vm_area_struct * vma,struct file * filp,unsigned long addr,unsigned long size)28 __vma_matches(struct vm_area_struct *vma, struct file *filp,
29 unsigned long addr, unsigned long size)
30 {
31 if (vma->vm_file != filp)
32 return false;
33
34 return vma->vm_start == addr &&
35 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
36 }
37
38 /**
39 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
40 * it is mapped to.
41 * @dev: drm device
42 * @data: ioctl data blob
43 * @file: drm file
44 *
45 * While the mapping holds a reference on the contents of the object, it doesn't
46 * imply a ref on the object itself.
47 *
48 * IMPORTANT:
49 *
50 * DRM driver writers who look a this function as an example for how to do GEM
51 * mmap support, please don't implement mmap support like here. The modern way
52 * to implement DRM mmap support is with an mmap offset ioctl (like
53 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
54 * That way debug tooling like valgrind will understand what's going on, hiding
55 * the mmap call in a driver private ioctl will break that. The i915 driver only
56 * does cpu mmaps this way because we didn't know better.
57 */
58 int
i915_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file)59 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
60 struct drm_file *file)
61 {
62 struct drm_i915_private *i915 = to_i915(dev);
63 struct drm_i915_gem_mmap *args = data;
64 struct drm_i915_gem_object *obj;
65 unsigned long addr;
66
67 /*
68 * mmap ioctl is disallowed for all discrete platforms,
69 * and for all platforms with GRAPHICS_VER > 12.
70 */
71 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
72 return -EOPNOTSUPP;
73
74 if (args->flags & ~(I915_MMAP_WC))
75 return -EINVAL;
76
77 if (args->flags & I915_MMAP_WC && !pat_enabled())
78 return -ENODEV;
79
80 obj = i915_gem_object_lookup(file, args->handle);
81 if (!obj)
82 return -ENOENT;
83
84 /* prime objects have no backing filp to GEM mmap
85 * pages from.
86 */
87 if (!obj->base.filp) {
88 addr = -ENXIO;
89 goto err;
90 }
91
92 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
93 addr = -EINVAL;
94 goto err;
95 }
96
97 addr = vm_mmap(obj->base.filp, 0, args->size,
98 PROT_READ | PROT_WRITE, MAP_SHARED,
99 args->offset);
100 if (IS_ERR_VALUE(addr))
101 goto err;
102
103 if (args->flags & I915_MMAP_WC) {
104 struct mm_struct *mm = current->mm;
105 struct vm_area_struct *vma;
106
107 if (mmap_write_lock_killable(mm)) {
108 addr = -EINTR;
109 goto err;
110 }
111 vma = find_vma(mm, addr);
112 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
113 vma->vm_page_prot =
114 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
115 else
116 addr = -ENOMEM;
117 mmap_write_unlock(mm);
118 if (IS_ERR_VALUE(addr))
119 goto err;
120 }
121 i915_gem_object_put(obj);
122
123 args->addr_ptr = (u64)addr;
124 return 0;
125
126 err:
127 i915_gem_object_put(obj);
128 return addr;
129 }
130
tile_row_pages(const struct drm_i915_gem_object * obj)131 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
132 {
133 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
134 }
135
136 /**
137 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
138 *
139 * A history of the GTT mmap interface:
140 *
141 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
142 * aligned and suitable for fencing, and still fit into the available
143 * mappable space left by the pinned display objects. A classic problem
144 * we called the page-fault-of-doom where we would ping-pong between
145 * two objects that could not fit inside the GTT and so the memcpy
146 * would page one object in at the expense of the other between every
147 * single byte.
148 *
149 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
150 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
151 * object is too large for the available space (or simply too large
152 * for the mappable aperture!), a view is created instead and faulted
153 * into userspace. (This view is aligned and sized appropriately for
154 * fenced access.)
155 *
156 * 2 - Recognise WC as a separate cache domain so that we can flush the
157 * delayed writes via GTT before performing direct access via WC.
158 *
159 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
160 * pagefault; swapin remains transparent.
161 *
162 * 4 - Support multiple fault handlers per object depending on object's
163 * backing storage (a.k.a. MMAP_OFFSET).
164 *
165 * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
166 * times with different size and offset).
167 *
168 * Restrictions:
169 *
170 * * snoopable objects cannot be accessed via the GTT. It can cause machine
171 * hangs on some architectures, corruption on others. An attempt to service
172 * a GTT page fault from a snoopable object will generate a SIGBUS.
173 *
174 * * the object must be able to fit into RAM (physical memory, though no
175 * limited to the mappable aperture).
176 *
177 *
178 * Caveats:
179 *
180 * * a new GTT page fault will synchronize rendering from the GPU and flush
181 * all data to system memory. Subsequent access will not be synchronized.
182 *
183 * * all mappings are revoked on runtime device suspend.
184 *
185 * * there are only 8, 16 or 32 fence registers to share between all users
186 * (older machines require fence register for display and blitter access
187 * as well). Contention of the fence registers will cause the previous users
188 * to be unmapped and any new access will generate new page faults.
189 *
190 * * running out of memory while servicing a fault may generate a SIGBUS,
191 * rather than the expected SIGSEGV.
192 */
i915_gem_mmap_gtt_version(void)193 int i915_gem_mmap_gtt_version(void)
194 {
195 return 5;
196 }
197
198 static inline struct i915_gtt_view
compute_partial_view(const struct drm_i915_gem_object * obj,pgoff_t page_offset,unsigned int chunk)199 compute_partial_view(const struct drm_i915_gem_object *obj,
200 pgoff_t page_offset,
201 unsigned int chunk)
202 {
203 struct i915_gtt_view view;
204
205 if (i915_gem_object_is_tiled(obj))
206 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
207
208 view.type = I915_GTT_VIEW_PARTIAL;
209 view.partial.offset = rounddown(page_offset, chunk);
210 view.partial.size =
211 min_t(unsigned int, chunk,
212 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
213
214 /* If the partial covers the entire object, just create a normal VMA. */
215 if (chunk >= obj->base.size >> PAGE_SHIFT)
216 view.type = I915_GTT_VIEW_NORMAL;
217
218 return view;
219 }
220
i915_error_to_vmf_fault(int err)221 static vm_fault_t i915_error_to_vmf_fault(int err)
222 {
223 switch (err) {
224 default:
225 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
226 fallthrough;
227 case -EIO: /* shmemfs failure from swap device */
228 case -EFAULT: /* purged object */
229 case -ENODEV: /* bad object, how did you get here! */
230 case -ENXIO: /* unable to access backing store (on device) */
231 return VM_FAULT_SIGBUS;
232
233 case -ENOMEM: /* our allocation failure */
234 return VM_FAULT_OOM;
235
236 case 0:
237 case -EAGAIN:
238 case -ENOSPC: /* transient failure to evict? */
239 case -ENOBUFS: /* temporarily out of fences? */
240 case -ERESTARTSYS:
241 case -EINTR:
242 case -EBUSY:
243 /*
244 * EBUSY is ok: this just means that another thread
245 * already did the job.
246 */
247 return VM_FAULT_NOPAGE;
248 }
249 }
250
vm_fault_cpu(struct vm_fault * vmf)251 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
252 {
253 struct vm_area_struct *area = vmf->vma;
254 struct i915_mmap_offset *mmo = area->vm_private_data;
255 struct drm_i915_gem_object *obj = mmo->obj;
256 unsigned long obj_offset;
257 resource_size_t iomap;
258 int err;
259
260 /* Sanity check that we allow writing into this object */
261 if (unlikely(i915_gem_object_is_readonly(obj) &&
262 area->vm_flags & VM_WRITE))
263 return VM_FAULT_SIGBUS;
264
265 if (i915_gem_object_lock_interruptible(obj, NULL))
266 return VM_FAULT_NOPAGE;
267
268 err = i915_gem_object_pin_pages(obj);
269 if (err)
270 goto out;
271
272 iomap = -1;
273 if (!i915_gem_object_has_struct_page(obj)) {
274 iomap = obj->mm.region->iomap.base;
275 iomap -= obj->mm.region->region.start;
276 }
277
278 obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
279 /* PTEs are revoked in obj->ops->put_pages() */
280 err = remap_io_sg(area,
281 area->vm_start, area->vm_end - area->vm_start,
282 obj->mm.pages->sgl, obj_offset, iomap);
283
284 if (area->vm_flags & VM_WRITE) {
285 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
286 obj->mm.dirty = true;
287 }
288
289 i915_gem_object_unpin_pages(obj);
290
291 out:
292 i915_gem_object_unlock(obj);
293 return i915_error_to_vmf_fault(err);
294 }
295
set_address_limits(struct vm_area_struct * area,struct i915_vma * vma,unsigned long obj_offset,resource_size_t gmadr_start,unsigned long * start_vaddr,unsigned long * end_vaddr,unsigned long * pfn)296 static void set_address_limits(struct vm_area_struct *area,
297 struct i915_vma *vma,
298 unsigned long obj_offset,
299 resource_size_t gmadr_start,
300 unsigned long *start_vaddr,
301 unsigned long *end_vaddr,
302 unsigned long *pfn)
303 {
304 unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
305 long start, end; /* memory boundaries */
306
307 /*
308 * Let's move into the ">> PAGE_SHIFT"
309 * domain to be sure not to lose bits
310 */
311 vm_start = area->vm_start >> PAGE_SHIFT;
312 vm_end = area->vm_end >> PAGE_SHIFT;
313 vma_size = vma->size >> PAGE_SHIFT;
314
315 /*
316 * Calculate the memory boundaries by considering the offset
317 * provided by the user during memory mapping and the offset
318 * provided for the partial mapping.
319 */
320 start = vm_start;
321 start -= obj_offset;
322 start += vma->gtt_view.partial.offset;
323 end = start + vma_size;
324
325 start = max_t(long, start, vm_start);
326 end = min_t(long, end, vm_end);
327
328 /* Let's move back into the "<< PAGE_SHIFT" domain */
329 *start_vaddr = (unsigned long)start << PAGE_SHIFT;
330 *end_vaddr = (unsigned long)end << PAGE_SHIFT;
331
332 *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
333 *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT;
334 *pfn += obj_offset - vma->gtt_view.partial.offset;
335 }
336
vm_fault_gtt(struct vm_fault * vmf)337 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
338 {
339 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
340 struct vm_area_struct *area = vmf->vma;
341 struct i915_mmap_offset *mmo = area->vm_private_data;
342 struct drm_i915_gem_object *obj = mmo->obj;
343 struct drm_device *dev = obj->base.dev;
344 struct drm_i915_private *i915 = to_i915(dev);
345 struct intel_runtime_pm *rpm = &i915->runtime_pm;
346 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
347 bool write = area->vm_flags & VM_WRITE;
348 struct i915_gem_ww_ctx ww;
349 unsigned long obj_offset;
350 unsigned long start, end; /* memory boundaries */
351 intel_wakeref_t wakeref;
352 struct i915_vma *vma;
353 pgoff_t page_offset;
354 unsigned long pfn;
355 int srcu;
356 int ret;
357
358 obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
359 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
360 page_offset += obj_offset;
361
362 trace_i915_gem_object_fault(obj, page_offset, true, write);
363
364 wakeref = intel_runtime_pm_get(rpm);
365
366 i915_gem_ww_ctx_init(&ww, true);
367 retry:
368 ret = i915_gem_object_lock(obj, &ww);
369 if (ret)
370 goto err_rpm;
371
372 /* Sanity check that we allow writing into this object */
373 if (i915_gem_object_is_readonly(obj) && write) {
374 ret = -EFAULT;
375 goto err_rpm;
376 }
377
378 ret = i915_gem_object_pin_pages(obj);
379 if (ret)
380 goto err_rpm;
381
382 ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
383 if (ret)
384 goto err_pages;
385
386 /* Now pin it into the GTT as needed */
387 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
388 PIN_MAPPABLE |
389 PIN_NONBLOCK /* NOWARN */ |
390 PIN_NOEVICT);
391 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
392 /* Use a partial view if it is bigger than available space */
393 struct i915_gtt_view view =
394 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
395 unsigned int flags;
396
397 flags = PIN_MAPPABLE | PIN_NOSEARCH;
398 if (view.type == I915_GTT_VIEW_NORMAL)
399 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
400
401 /*
402 * Userspace is now writing through an untracked VMA, abandon
403 * all hope that the hardware is able to track future writes.
404 */
405
406 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
407 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
408 flags = PIN_MAPPABLE;
409 view.type = I915_GTT_VIEW_PARTIAL;
410 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
411 }
412
413 /*
414 * The entire mappable GGTT is pinned? Unexpected!
415 * Try to evict the object we locked too, as normally we skip it
416 * due to lack of short term pinning inside execbuf.
417 */
418 if (vma == ERR_PTR(-ENOSPC)) {
419 ret = mutex_lock_interruptible(&ggtt->vm.mutex);
420 if (!ret) {
421 ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
422 mutex_unlock(&ggtt->vm.mutex);
423 }
424 if (ret)
425 goto err_reset;
426 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
427 }
428 }
429 if (IS_ERR(vma)) {
430 ret = PTR_ERR(vma);
431 goto err_reset;
432 }
433
434 /* Access to snoopable pages through the GTT is incoherent. */
435 /*
436 * For objects created by userspace through GEM_CREATE with pat_index
437 * set by set_pat extension, coherency is managed by userspace, make
438 * sure we don't fail handling the vm fault by calling
439 * i915_gem_object_has_cache_level() which always return true for such
440 * objects. Otherwise this helper function would fall back to checking
441 * whether the object is un-cached.
442 */
443 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
444 HAS_LLC(i915))) {
445 ret = -EFAULT;
446 goto err_unpin;
447 }
448
449 ret = i915_vma_pin_fence(vma);
450 if (ret)
451 goto err_unpin;
452
453 /*
454 * Dump all the necessary parameters in this function to perform the
455 * arithmetic calculation for the virtual address start and end and
456 * the PFN (Page Frame Number).
457 */
458 set_address_limits(area, vma, obj_offset, ggtt->gmadr.start,
459 &start, &end, &pfn);
460
461 /* Finally, remap it using the new GTT offset */
462 ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
463 if (ret)
464 goto err_fence;
465
466 assert_rpm_wakelock_held(rpm);
467
468 /* Mark as being mmapped into userspace for later revocation */
469 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
470 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
471 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
472 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
473
474 /* Track the mmo associated with the fenced vma */
475 vma->mmo = mmo;
476
477 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
478 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
479 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
480
481 if (write) {
482 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
483 i915_vma_set_ggtt_write(vma);
484 obj->mm.dirty = true;
485 }
486
487 err_fence:
488 i915_vma_unpin_fence(vma);
489 err_unpin:
490 __i915_vma_unpin(vma);
491 err_reset:
492 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
493 err_pages:
494 i915_gem_object_unpin_pages(obj);
495 err_rpm:
496 if (ret == -EDEADLK) {
497 ret = i915_gem_ww_ctx_backoff(&ww);
498 if (!ret)
499 goto retry;
500 }
501 i915_gem_ww_ctx_fini(&ww);
502 intel_runtime_pm_put(rpm, wakeref);
503 return i915_error_to_vmf_fault(ret);
504 }
505
506 static int
vm_access(struct vm_area_struct * area,unsigned long addr,void * buf,int len,int write)507 vm_access(struct vm_area_struct *area, unsigned long addr,
508 void *buf, int len, int write)
509 {
510 struct i915_mmap_offset *mmo = area->vm_private_data;
511 struct drm_i915_gem_object *obj = mmo->obj;
512 struct i915_gem_ww_ctx ww;
513 void *vaddr;
514 int err = 0;
515
516 if (i915_gem_object_is_readonly(obj) && write)
517 return -EACCES;
518
519 addr -= area->vm_start;
520 if (range_overflows_t(u64, addr, len, obj->base.size))
521 return -EINVAL;
522
523 i915_gem_ww_ctx_init(&ww, true);
524 retry:
525 err = i915_gem_object_lock(obj, &ww);
526 if (err)
527 goto out;
528
529 /* As this is primarily for debugging, let's focus on simplicity */
530 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
531 if (IS_ERR(vaddr)) {
532 err = PTR_ERR(vaddr);
533 goto out;
534 }
535
536 if (write) {
537 memcpy(vaddr + addr, buf, len);
538 __i915_gem_object_flush_map(obj, addr, len);
539 } else {
540 memcpy(buf, vaddr + addr, len);
541 }
542
543 i915_gem_object_unpin_map(obj);
544 out:
545 if (err == -EDEADLK) {
546 err = i915_gem_ww_ctx_backoff(&ww);
547 if (!err)
548 goto retry;
549 }
550 i915_gem_ww_ctx_fini(&ww);
551
552 if (err)
553 return err;
554
555 return len;
556 }
557
__i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object * obj)558 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
559 {
560 struct i915_vma *vma;
561
562 GEM_BUG_ON(!obj->userfault_count);
563
564 for_each_ggtt_vma(vma, obj)
565 i915_vma_revoke_mmap(vma);
566
567 GEM_BUG_ON(obj->userfault_count);
568 }
569
570 /*
571 * It is vital that we remove the page mapping if we have mapped a tiled
572 * object through the GTT and then lose the fence register due to
573 * resource pressure. Similarly if the object has been moved out of the
574 * aperture, than pages mapped into userspace must be revoked. Removing the
575 * mapping will then trigger a page fault on the next user access, allowing
576 * fixup by vm_fault_gtt().
577 */
i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object * obj)578 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
579 {
580 struct drm_i915_private *i915 = to_i915(obj->base.dev);
581 intel_wakeref_t wakeref;
582
583 /*
584 * Serialisation between user GTT access and our code depends upon
585 * revoking the CPU's PTE whilst the mutex is held. The next user
586 * pagefault then has to wait until we release the mutex.
587 *
588 * Note that RPM complicates somewhat by adding an additional
589 * requirement that operations to the GGTT be made holding the RPM
590 * wakeref.
591 */
592 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
593 mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
594
595 if (!obj->userfault_count)
596 goto out;
597
598 __i915_gem_object_release_mmap_gtt(obj);
599
600 /*
601 * Ensure that the CPU's PTE are revoked and there are not outstanding
602 * memory transactions from userspace before we return. The TLB
603 * flushing implied above by changing the PTE above *should* be
604 * sufficient, an extra barrier here just provides us with a bit
605 * of paranoid documentation about our requirement to serialise
606 * memory writes before touching registers / GSM.
607 */
608 wmb();
609
610 out:
611 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
612 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
613 }
614
i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object * obj)615 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
616 {
617 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
618 struct ttm_device *bdev = bo->bdev;
619
620 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
621
622 /*
623 * We have exclusive access here via runtime suspend. All other callers
624 * must first grab the rpm wakeref.
625 */
626 GEM_BUG_ON(!obj->userfault_count);
627 list_del(&obj->userfault_link);
628 obj->userfault_count = 0;
629 }
630
i915_gem_object_release_mmap_offset(struct drm_i915_gem_object * obj)631 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
632 {
633 struct i915_mmap_offset *mmo, *mn;
634
635 if (obj->ops->unmap_virtual)
636 obj->ops->unmap_virtual(obj);
637
638 spin_lock(&obj->mmo.lock);
639 rbtree_postorder_for_each_entry_safe(mmo, mn,
640 &obj->mmo.offsets, offset) {
641 /*
642 * vma_node_unmap for GTT mmaps handled already in
643 * __i915_gem_object_release_mmap_gtt
644 */
645 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
646 continue;
647
648 spin_unlock(&obj->mmo.lock);
649 drm_vma_node_unmap(&mmo->vma_node,
650 obj->base.dev->anon_inode->i_mapping);
651 spin_lock(&obj->mmo.lock);
652 }
653 spin_unlock(&obj->mmo.lock);
654 }
655
656 static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object * obj,enum i915_mmap_type mmap_type)657 lookup_mmo(struct drm_i915_gem_object *obj,
658 enum i915_mmap_type mmap_type)
659 {
660 struct rb_node *rb;
661
662 spin_lock(&obj->mmo.lock);
663 rb = obj->mmo.offsets.rb_node;
664 while (rb) {
665 struct i915_mmap_offset *mmo =
666 rb_entry(rb, typeof(*mmo), offset);
667
668 if (mmo->mmap_type == mmap_type) {
669 spin_unlock(&obj->mmo.lock);
670 return mmo;
671 }
672
673 if (mmo->mmap_type < mmap_type)
674 rb = rb->rb_right;
675 else
676 rb = rb->rb_left;
677 }
678 spin_unlock(&obj->mmo.lock);
679
680 return NULL;
681 }
682
683 static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object * obj,struct i915_mmap_offset * mmo)684 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
685 {
686 struct rb_node *rb, **p;
687
688 spin_lock(&obj->mmo.lock);
689 rb = NULL;
690 p = &obj->mmo.offsets.rb_node;
691 while (*p) {
692 struct i915_mmap_offset *pos;
693
694 rb = *p;
695 pos = rb_entry(rb, typeof(*pos), offset);
696
697 if (pos->mmap_type == mmo->mmap_type) {
698 spin_unlock(&obj->mmo.lock);
699 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
700 &mmo->vma_node);
701 kfree(mmo);
702 return pos;
703 }
704
705 if (pos->mmap_type < mmo->mmap_type)
706 p = &rb->rb_right;
707 else
708 p = &rb->rb_left;
709 }
710 rb_link_node(&mmo->offset, rb, p);
711 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
712 spin_unlock(&obj->mmo.lock);
713
714 return mmo;
715 }
716
717 static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object * obj,enum i915_mmap_type mmap_type,struct drm_file * file)718 mmap_offset_attach(struct drm_i915_gem_object *obj,
719 enum i915_mmap_type mmap_type,
720 struct drm_file *file)
721 {
722 struct drm_i915_private *i915 = to_i915(obj->base.dev);
723 struct i915_mmap_offset *mmo;
724 int err;
725
726 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
727
728 mmo = lookup_mmo(obj, mmap_type);
729 if (mmo)
730 goto out;
731
732 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
733 if (!mmo)
734 return ERR_PTR(-ENOMEM);
735
736 mmo->obj = obj;
737 mmo->mmap_type = mmap_type;
738 drm_vma_node_reset(&mmo->vma_node);
739
740 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
741 &mmo->vma_node, obj->base.size / PAGE_SIZE);
742 if (likely(!err))
743 goto insert;
744
745 /* Attempt to reap some mmap space from dead objects */
746 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
747 NULL);
748 if (err)
749 goto err;
750
751 i915_gem_drain_freed_objects(i915);
752 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
753 &mmo->vma_node, obj->base.size / PAGE_SIZE);
754 if (err)
755 goto err;
756
757 insert:
758 mmo = insert_mmo(obj, mmo);
759 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
760 out:
761 if (file)
762 drm_vma_node_allow_once(&mmo->vma_node, file);
763 return mmo;
764
765 err:
766 kfree(mmo);
767 return ERR_PTR(err);
768 }
769
770 static int
__assign_mmap_offset(struct drm_i915_gem_object * obj,enum i915_mmap_type mmap_type,u64 * offset,struct drm_file * file)771 __assign_mmap_offset(struct drm_i915_gem_object *obj,
772 enum i915_mmap_type mmap_type,
773 u64 *offset, struct drm_file *file)
774 {
775 struct i915_mmap_offset *mmo;
776
777 if (i915_gem_object_never_mmap(obj))
778 return -ENODEV;
779
780 if (obj->ops->mmap_offset) {
781 if (mmap_type != I915_MMAP_TYPE_FIXED)
782 return -ENODEV;
783
784 *offset = obj->ops->mmap_offset(obj);
785 return 0;
786 }
787
788 if (mmap_type == I915_MMAP_TYPE_FIXED)
789 return -ENODEV;
790
791 if (mmap_type != I915_MMAP_TYPE_GTT &&
792 !i915_gem_object_has_struct_page(obj) &&
793 !i915_gem_object_has_iomem(obj))
794 return -ENODEV;
795
796 mmo = mmap_offset_attach(obj, mmap_type, file);
797 if (IS_ERR(mmo))
798 return PTR_ERR(mmo);
799
800 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
801 return 0;
802 }
803
804 static int
__assign_mmap_offset_handle(struct drm_file * file,u32 handle,enum i915_mmap_type mmap_type,u64 * offset)805 __assign_mmap_offset_handle(struct drm_file *file,
806 u32 handle,
807 enum i915_mmap_type mmap_type,
808 u64 *offset)
809 {
810 struct drm_i915_gem_object *obj;
811 int err;
812
813 obj = i915_gem_object_lookup(file, handle);
814 if (!obj)
815 return -ENOENT;
816
817 err = i915_gem_object_lock_interruptible(obj, NULL);
818 if (err)
819 goto out_put;
820 err = __assign_mmap_offset(obj, mmap_type, offset, file);
821 i915_gem_object_unlock(obj);
822 out_put:
823 i915_gem_object_put(obj);
824 return err;
825 }
826
827 int
i915_gem_dumb_mmap_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)828 i915_gem_dumb_mmap_offset(struct drm_file *file,
829 struct drm_device *dev,
830 u32 handle,
831 u64 *offset)
832 {
833 struct drm_i915_private *i915 = to_i915(dev);
834 enum i915_mmap_type mmap_type;
835
836 if (HAS_LMEM(to_i915(dev)))
837 mmap_type = I915_MMAP_TYPE_FIXED;
838 else if (pat_enabled())
839 mmap_type = I915_MMAP_TYPE_WC;
840 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
841 return -ENODEV;
842 else
843 mmap_type = I915_MMAP_TYPE_GTT;
844
845 return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
846 }
847
848 /**
849 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
850 * @dev: DRM device
851 * @data: GTT mapping ioctl data
852 * @file: GEM object info
853 *
854 * Simply returns the fake offset to userspace so it can mmap it.
855 * The mmap call will end up in drm_gem_mmap(), which will set things
856 * up so we can get faults in the handler above.
857 *
858 * The fault handler will take care of binding the object into the GTT
859 * (since it may have been evicted to make room for something), allocating
860 * a fence register, and mapping the appropriate aperture address into
861 * userspace.
862 */
863 int
i915_gem_mmap_offset_ioctl(struct drm_device * dev,void * data,struct drm_file * file)864 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
865 struct drm_file *file)
866 {
867 struct drm_i915_private *i915 = to_i915(dev);
868 struct drm_i915_gem_mmap_offset *args = data;
869 enum i915_mmap_type type;
870 int err;
871
872 /*
873 * Historically we failed to check args.pad and args.offset
874 * and so we cannot use those fields for user input and we cannot
875 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
876 * may be feeding in garbage in those fields.
877 *
878 * if (args->pad) return -EINVAL; is verbotten!
879 */
880
881 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
882 NULL, 0, NULL);
883 if (err)
884 return err;
885
886 switch (args->flags) {
887 case I915_MMAP_OFFSET_GTT:
888 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
889 return -ENODEV;
890 type = I915_MMAP_TYPE_GTT;
891 break;
892
893 case I915_MMAP_OFFSET_WC:
894 if (!pat_enabled())
895 return -ENODEV;
896 type = I915_MMAP_TYPE_WC;
897 break;
898
899 case I915_MMAP_OFFSET_WB:
900 type = I915_MMAP_TYPE_WB;
901 break;
902
903 case I915_MMAP_OFFSET_UC:
904 if (!pat_enabled())
905 return -ENODEV;
906 type = I915_MMAP_TYPE_UC;
907 break;
908
909 case I915_MMAP_OFFSET_FIXED:
910 type = I915_MMAP_TYPE_FIXED;
911 break;
912
913 default:
914 return -EINVAL;
915 }
916
917 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
918 }
919
vm_open(struct vm_area_struct * vma)920 static void vm_open(struct vm_area_struct *vma)
921 {
922 struct i915_mmap_offset *mmo = vma->vm_private_data;
923 struct drm_i915_gem_object *obj = mmo->obj;
924
925 GEM_BUG_ON(!obj);
926 i915_gem_object_get(obj);
927 }
928
vm_close(struct vm_area_struct * vma)929 static void vm_close(struct vm_area_struct *vma)
930 {
931 struct i915_mmap_offset *mmo = vma->vm_private_data;
932 struct drm_i915_gem_object *obj = mmo->obj;
933
934 GEM_BUG_ON(!obj);
935 i915_gem_object_put(obj);
936 }
937
938 static const struct vm_operations_struct vm_ops_gtt = {
939 .fault = vm_fault_gtt,
940 .access = vm_access,
941 .open = vm_open,
942 .close = vm_close,
943 };
944
945 static const struct vm_operations_struct vm_ops_cpu = {
946 .fault = vm_fault_cpu,
947 .access = vm_access,
948 .open = vm_open,
949 .close = vm_close,
950 };
951
singleton_release(struct inode * inode,struct file * file)952 static int singleton_release(struct inode *inode, struct file *file)
953 {
954 struct drm_i915_private *i915 = file->private_data;
955
956 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
957 drm_dev_put(&i915->drm);
958
959 return 0;
960 }
961
962 static const struct file_operations singleton_fops = {
963 .owner = THIS_MODULE,
964 .release = singleton_release,
965 };
966
mmap_singleton(struct drm_i915_private * i915)967 static struct file *mmap_singleton(struct drm_i915_private *i915)
968 {
969 struct file *file;
970
971 file = get_file_active(&i915->gem.mmap_singleton);
972 if (file)
973 return file;
974
975 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
976 if (IS_ERR(file))
977 return file;
978
979 /* Everyone shares a single global address space */
980 file->f_mapping = i915->drm.anon_inode->i_mapping;
981
982 smp_store_mb(i915->gem.mmap_singleton, file);
983 drm_dev_get(&i915->drm);
984
985 return file;
986 }
987
988 static int
i915_gem_object_mmap(struct drm_i915_gem_object * obj,struct i915_mmap_offset * mmo,struct vm_area_struct * vma)989 i915_gem_object_mmap(struct drm_i915_gem_object *obj,
990 struct i915_mmap_offset *mmo,
991 struct vm_area_struct *vma)
992 {
993 struct drm_i915_private *i915 = to_i915(obj->base.dev);
994 struct drm_device *dev = &i915->drm;
995 struct file *anon;
996
997 if (i915_gem_object_is_readonly(obj)) {
998 if (vma->vm_flags & VM_WRITE) {
999 i915_gem_object_put(obj);
1000 return -EINVAL;
1001 }
1002 vm_flags_clear(vma, VM_MAYWRITE);
1003 }
1004
1005 anon = mmap_singleton(to_i915(dev));
1006 if (IS_ERR(anon)) {
1007 i915_gem_object_put(obj);
1008 return PTR_ERR(anon);
1009 }
1010
1011 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
1012
1013 /*
1014 * We keep the ref on mmo->obj, not vm_file, but we require
1015 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1016 * Our userspace is accustomed to having per-file resource cleanup
1017 * (i.e. contexts, objects and requests) on their close(fd), which
1018 * requires avoiding extraneous references to their filp, hence why
1019 * we prefer to use an anonymous file for their mmaps.
1020 */
1021 vma_set_file(vma, anon);
1022 /* Drop the initial creation reference, the vma is now holding one. */
1023 fput(anon);
1024
1025 if (obj->ops->mmap_ops) {
1026 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
1027 vma->vm_ops = obj->ops->mmap_ops;
1028 vma->vm_private_data = obj->base.vma_node.driver_private;
1029 return 0;
1030 }
1031
1032 vma->vm_private_data = mmo;
1033
1034 switch (mmo->mmap_type) {
1035 case I915_MMAP_TYPE_WC:
1036 vma->vm_page_prot =
1037 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1038 vma->vm_ops = &vm_ops_cpu;
1039 break;
1040
1041 case I915_MMAP_TYPE_FIXED:
1042 GEM_WARN_ON(1);
1043 fallthrough;
1044 case I915_MMAP_TYPE_WB:
1045 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1046 vma->vm_ops = &vm_ops_cpu;
1047 break;
1048
1049 case I915_MMAP_TYPE_UC:
1050 vma->vm_page_prot =
1051 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1052 vma->vm_ops = &vm_ops_cpu;
1053 break;
1054
1055 case I915_MMAP_TYPE_GTT:
1056 vma->vm_page_prot =
1057 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1058 vma->vm_ops = &vm_ops_gtt;
1059 break;
1060 }
1061 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1062
1063 return 0;
1064 }
1065
1066 /*
1067 * This overcomes the limitation in drm_gem_mmap's assignment of a
1068 * drm_gem_object as the vma->vm_private_data. Since we need to
1069 * be able to resolve multiple mmap offsets which could be tied
1070 * to a single gem object.
1071 */
i915_gem_mmap(struct file * filp,struct vm_area_struct * vma)1072 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1073 {
1074 struct drm_vma_offset_node *node;
1075 struct drm_file *priv = filp->private_data;
1076 struct drm_device *dev = priv->minor->dev;
1077 struct drm_i915_gem_object *obj = NULL;
1078 struct i915_mmap_offset *mmo = NULL;
1079
1080 if (drm_dev_is_unplugged(dev))
1081 return -ENODEV;
1082
1083 rcu_read_lock();
1084 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1085 node = drm_vma_offset_lookup_locked(dev->vma_offset_manager,
1086 vma->vm_pgoff,
1087 vma_pages(vma));
1088 if (node && drm_vma_node_is_allowed(node, priv)) {
1089 /*
1090 * Skip 0-refcnted objects as it is in the process of being
1091 * destroyed and will be invalid when the vma manager lock
1092 * is released.
1093 */
1094 if (!node->driver_private) {
1095 mmo = container_of(node, struct i915_mmap_offset, vma_node);
1096 obj = i915_gem_object_get_rcu(mmo->obj);
1097
1098 GEM_BUG_ON(obj && obj->ops->mmap_ops);
1099 } else {
1100 obj = i915_gem_object_get_rcu
1101 (container_of(node, struct drm_i915_gem_object,
1102 base.vma_node));
1103
1104 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1105 }
1106 }
1107 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1108 rcu_read_unlock();
1109 if (!obj)
1110 return node ? -EACCES : -EINVAL;
1111
1112 return i915_gem_object_mmap(obj, mmo, vma);
1113 }
1114
i915_gem_fb_mmap(struct drm_i915_gem_object * obj,struct vm_area_struct * vma)1115 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1116 {
1117 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1118 struct drm_device *dev = &i915->drm;
1119 struct i915_mmap_offset *mmo = NULL;
1120 enum i915_mmap_type mmap_type;
1121 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1122
1123 if (drm_dev_is_unplugged(dev))
1124 return -ENODEV;
1125
1126 /* handle ttm object */
1127 if (obj->ops->mmap_ops) {
1128 /*
1129 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
1130 * to calculate page offset so set that up.
1131 */
1132 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1133 } else {
1134 /* handle stolen and smem objects */
1135 mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
1136 mmo = mmap_offset_attach(obj, mmap_type, NULL);
1137 if (IS_ERR(mmo))
1138 return PTR_ERR(mmo);
1139
1140 vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
1141 }
1142
1143 /*
1144 * When we install vm_ops for mmap we are too late for
1145 * the vm_ops->open() which increases the ref_count of
1146 * this obj and then it gets decreased by the vm_ops->close().
1147 * To balance this increase the obj ref_count here.
1148 */
1149 obj = i915_gem_object_get(obj);
1150 return i915_gem_object_mmap(obj, mmo, vma);
1151 }
1152
1153 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1154 #include "selftests/i915_gem_mman.c"
1155 #endif
1156