xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_object.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2017 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/highmem.h>
27 #include <linux/sched/mm.h>
28 
29 #include <drm/drm_cache.h>
30 
31 #include "display/intel_frontbuffer.h"
32 #include "pxp/intel_pxp.h"
33 
34 #include "i915_drv.h"
35 #include "i915_file_private.h"
36 #include "i915_gem_clflush.h"
37 #include "i915_gem_context.h"
38 #include "i915_gem_dmabuf.h"
39 #include "i915_gem_mman.h"
40 #include "i915_gem_object.h"
41 #include "i915_gem_object_frontbuffer.h"
42 #include "i915_gem_ttm.h"
43 #include "i915_memcpy.h"
44 #include "i915_trace.h"
45 
46 static struct kmem_cache *slab_objects;
47 
48 static const struct drm_gem_object_funcs i915_gem_object_funcs;
49 
i915_gem_get_pat_index(struct drm_i915_private * i915,enum i915_cache_level level)50 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
51 				    enum i915_cache_level level)
52 {
53 	if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL))
54 		return 0;
55 
56 	return INTEL_INFO(i915)->cachelevel_to_pat[level];
57 }
58 
i915_gem_object_has_cache_level(const struct drm_i915_gem_object * obj,enum i915_cache_level lvl)59 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
60 				     enum i915_cache_level lvl)
61 {
62 	/*
63 	 * In case the pat_index is set by user space, this kernel mode
64 	 * driver should leave the coherency to be managed by user space,
65 	 * simply return true here.
66 	 */
67 	if (obj->pat_set_by_user)
68 		return true;
69 
70 	/*
71 	 * Otherwise the pat_index should have been converted from cache_level
72 	 * so that the following comparison is valid.
73 	 */
74 	return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl);
75 }
76 
i915_gem_object_alloc(void)77 struct drm_i915_gem_object *i915_gem_object_alloc(void)
78 {
79 	struct drm_i915_gem_object *obj;
80 
81 	obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
82 	if (!obj)
83 		return NULL;
84 	obj->base.funcs = &i915_gem_object_funcs;
85 
86 	return obj;
87 }
88 
i915_gem_object_free(struct drm_i915_gem_object * obj)89 void i915_gem_object_free(struct drm_i915_gem_object *obj)
90 {
91 	return kmem_cache_free(slab_objects, obj);
92 }
93 
i915_gem_object_init(struct drm_i915_gem_object * obj,const struct drm_i915_gem_object_ops * ops,struct lock_class_key * key,unsigned flags)94 void i915_gem_object_init(struct drm_i915_gem_object *obj,
95 			  const struct drm_i915_gem_object_ops *ops,
96 			  struct lock_class_key *key, unsigned flags)
97 {
98 	/*
99 	 * A gem object is embedded both in a struct ttm_buffer_object :/ and
100 	 * in a drm_i915_gem_object. Make sure they are aliased.
101 	 */
102 	BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
103 		     offsetof(typeof(*obj), __do_not_access.base));
104 
105 	spin_lock_init(&obj->vma.lock);
106 	INIT_LIST_HEAD(&obj->vma.list);
107 
108 	INIT_LIST_HEAD(&obj->mm.link);
109 
110 #ifdef CONFIG_PROC_FS
111 	INIT_LIST_HEAD(&obj->client_link);
112 #endif
113 
114 	INIT_LIST_HEAD(&obj->lut_list);
115 	spin_lock_init(&obj->lut_lock);
116 
117 	spin_lock_init(&obj->mmo.lock);
118 	obj->mmo.offsets = RB_ROOT;
119 
120 	init_rcu_head(&obj->rcu);
121 
122 	obj->ops = ops;
123 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
124 	obj->flags = flags;
125 
126 	obj->mm.madv = I915_MADV_WILLNEED;
127 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
128 	mutex_init(&obj->mm.get_page.lock);
129 	INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
130 	mutex_init(&obj->mm.get_dma_page.lock);
131 }
132 
133 /**
134  * __i915_gem_object_fini - Clean up a GEM object initialization
135  * @obj: The gem object to cleanup
136  *
137  * This function cleans up gem object fields that are set up by
138  * drm_gem_private_object_init() and i915_gem_object_init().
139  * It's primarily intended as a helper for backends that need to
140  * clean up the gem object in separate steps.
141  */
__i915_gem_object_fini(struct drm_i915_gem_object * obj)142 void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
143 {
144 	mutex_destroy(&obj->mm.get_page.lock);
145 	mutex_destroy(&obj->mm.get_dma_page.lock);
146 	dma_resv_fini(&obj->base._resv);
147 }
148 
149 /**
150  * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
151  * for a given cache_level
152  * @obj: #drm_i915_gem_object
153  * @cache_level: cache level
154  */
i915_gem_object_set_cache_coherency(struct drm_i915_gem_object * obj,unsigned int cache_level)155 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
156 					 unsigned int cache_level)
157 {
158 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
159 
160 	obj->pat_index = i915_gem_get_pat_index(i915, cache_level);
161 
162 	if (cache_level != I915_CACHE_NONE)
163 		obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
164 				       I915_BO_CACHE_COHERENT_FOR_WRITE);
165 	else if (HAS_LLC(i915))
166 		obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
167 	else
168 		obj->cache_coherent = 0;
169 
170 	obj->cache_dirty =
171 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
172 		!IS_DGFX(i915);
173 }
174 
175 /**
176  * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode
177  * @obj: #drm_i915_gem_object
178  * @pat_index: PAT index
179  *
180  * This is a clone of i915_gem_object_set_cache_coherency taking pat index
181  * instead of cache_level as its second argument.
182  */
i915_gem_object_set_pat_index(struct drm_i915_gem_object * obj,unsigned int pat_index)183 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
184 				   unsigned int pat_index)
185 {
186 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
187 
188 	if (obj->pat_index == pat_index)
189 		return;
190 
191 	obj->pat_index = pat_index;
192 
193 	if (pat_index != i915_gem_get_pat_index(i915, I915_CACHE_NONE))
194 		obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
195 				       I915_BO_CACHE_COHERENT_FOR_WRITE);
196 	else if (HAS_LLC(i915))
197 		obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
198 	else
199 		obj->cache_coherent = 0;
200 
201 	obj->cache_dirty =
202 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
203 		!IS_DGFX(i915);
204 }
205 
i915_gem_object_can_bypass_llc(struct drm_i915_gem_object * obj)206 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
207 {
208 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
209 
210 	/*
211 	 * This is purely from a security perspective, so we simply don't care
212 	 * about non-userspace objects being able to bypass the LLC.
213 	 */
214 	if (!(obj->flags & I915_BO_ALLOC_USER))
215 		return false;
216 
217 	/*
218 	 * Always flush cache for UMD objects at creation time.
219 	 */
220 	if (obj->pat_set_by_user)
221 		return true;
222 
223 	/*
224 	 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
225 	 * possible for userspace to bypass the GTT caching bits set by the
226 	 * kernel, as per the given object cache_level. This is troublesome
227 	 * since the heavy flush we apply when first gathering the pages is
228 	 * skipped if the kernel thinks the object is coherent with the GPU. As
229 	 * a result it might be possible to bypass the cache and read the
230 	 * contents of the page directly, which could be stale data. If it's
231 	 * just a case of userspace shooting themselves in the foot then so be
232 	 * it, but since i915 takes the stance of always zeroing memory before
233 	 * handing it to userspace, we need to prevent this.
234 	 */
235 	return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915));
236 }
237 
i915_gem_close_object(struct drm_gem_object * gem,struct drm_file * file)238 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
239 {
240 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
241 	struct drm_i915_file_private *fpriv = file->driver_priv;
242 	struct i915_lut_handle bookmark = {};
243 	struct i915_mmap_offset *mmo, *mn;
244 	struct i915_lut_handle *lut, *ln;
245 	LIST_HEAD(close);
246 
247 	spin_lock(&obj->lut_lock);
248 	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
249 		struct i915_gem_context *ctx = lut->ctx;
250 
251 		if (ctx && ctx->file_priv == fpriv) {
252 			i915_gem_context_get(ctx);
253 			list_move(&lut->obj_link, &close);
254 		}
255 
256 		/* Break long locks, and carefully continue on from this spot */
257 		if (&ln->obj_link != &obj->lut_list) {
258 			list_add_tail(&bookmark.obj_link, &ln->obj_link);
259 			if (cond_resched_lock(&obj->lut_lock))
260 				list_safe_reset_next(&bookmark, ln, obj_link);
261 			__list_del_entry(&bookmark.obj_link);
262 		}
263 	}
264 	spin_unlock(&obj->lut_lock);
265 
266 	spin_lock(&obj->mmo.lock);
267 	rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
268 		drm_vma_node_revoke(&mmo->vma_node, file);
269 	spin_unlock(&obj->mmo.lock);
270 
271 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
272 		struct i915_gem_context *ctx = lut->ctx;
273 		struct i915_vma *vma;
274 
275 		/*
276 		 * We allow the process to have multiple handles to the same
277 		 * vma, in the same fd namespace, by virtue of flink/open.
278 		 */
279 
280 		mutex_lock(&ctx->lut_mutex);
281 		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
282 		if (vma) {
283 			GEM_BUG_ON(vma->obj != obj);
284 			GEM_BUG_ON(!atomic_read(&vma->open_count));
285 			i915_vma_close(vma);
286 		}
287 		mutex_unlock(&ctx->lut_mutex);
288 
289 		i915_gem_context_put(lut->ctx);
290 		i915_lut_handle_free(lut);
291 		i915_gem_object_put(obj);
292 	}
293 }
294 
__i915_gem_free_object_rcu(struct rcu_head * head)295 void __i915_gem_free_object_rcu(struct rcu_head *head)
296 {
297 	struct drm_i915_gem_object *obj =
298 		container_of(head, typeof(*obj), rcu);
299 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
300 
301 	/* We need to keep this alive for RCU read access from fdinfo. */
302 	if (obj->mm.n_placements > 1)
303 		kfree(obj->mm.placements);
304 
305 	i915_gem_object_free(obj);
306 
307 	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
308 	atomic_dec(&i915->mm.free_count);
309 }
310 
__i915_gem_object_free_mmaps(struct drm_i915_gem_object * obj)311 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
312 {
313 	/* Skip serialisation and waking the device if known to be not used. */
314 
315 	if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
316 		i915_gem_object_release_mmap_gtt(obj);
317 
318 	if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
319 		struct i915_mmap_offset *mmo, *mn;
320 
321 		i915_gem_object_release_mmap_offset(obj);
322 
323 		rbtree_postorder_for_each_entry_safe(mmo, mn,
324 						     &obj->mmo.offsets,
325 						     offset) {
326 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
327 					      &mmo->vma_node);
328 			kfree(mmo);
329 		}
330 		obj->mmo.offsets = RB_ROOT;
331 	}
332 }
333 
334 /**
335  * __i915_gem_object_pages_fini - Clean up pages use of a gem object
336  * @obj: The gem object to clean up
337  *
338  * This function cleans up usage of the object mm.pages member. It
339  * is intended for backends that need to clean up a gem object in
340  * separate steps and needs to be called when the object is idle before
341  * the object's backing memory is freed.
342  */
__i915_gem_object_pages_fini(struct drm_i915_gem_object * obj)343 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
344 {
345 	assert_object_held_shared(obj);
346 
347 	if (!list_empty(&obj->vma.list)) {
348 		struct i915_vma *vma;
349 
350 		spin_lock(&obj->vma.lock);
351 		while ((vma = list_first_entry_or_null(&obj->vma.list,
352 						       struct i915_vma,
353 						       obj_link))) {
354 			GEM_BUG_ON(vma->obj != obj);
355 			spin_unlock(&obj->vma.lock);
356 
357 			i915_vma_destroy(vma);
358 
359 			spin_lock(&obj->vma.lock);
360 		}
361 		spin_unlock(&obj->vma.lock);
362 	}
363 
364 	__i915_gem_object_free_mmaps(obj);
365 
366 	atomic_set(&obj->mm.pages_pin_count, 0);
367 
368 	/*
369 	 * dma_buf_unmap_attachment() requires reservation to be
370 	 * locked. The imported GEM shouldn't share reservation lock
371 	 * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
372 	 * dma-buf, so it's safe to take the lock.
373 	 */
374 	if (obj->base.import_attach)
375 		i915_gem_object_lock(obj, NULL);
376 
377 	__i915_gem_object_put_pages(obj);
378 
379 	if (obj->base.import_attach)
380 		i915_gem_object_unlock(obj);
381 
382 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
383 }
384 
__i915_gem_free_object(struct drm_i915_gem_object * obj)385 void __i915_gem_free_object(struct drm_i915_gem_object *obj)
386 {
387 	trace_i915_gem_object_destroy(obj);
388 
389 	GEM_BUG_ON(!list_empty(&obj->lut_list));
390 
391 	bitmap_free(obj->bit_17);
392 
393 	if (obj->base.import_attach)
394 		drm_prime_gem_destroy(&obj->base, NULL);
395 
396 	drm_gem_free_mmap_offset(&obj->base);
397 
398 	if (obj->ops->release)
399 		obj->ops->release(obj);
400 
401 	if (obj->shares_resv_from)
402 		i915_vm_resv_put(obj->shares_resv_from);
403 
404 	__i915_gem_object_fini(obj);
405 }
406 
__i915_gem_free_objects(struct drm_i915_private * i915,struct llist_node * freed)407 static void __i915_gem_free_objects(struct drm_i915_private *i915,
408 				    struct llist_node *freed)
409 {
410 	struct drm_i915_gem_object *obj, *on;
411 
412 	llist_for_each_entry_safe(obj, on, freed, freed) {
413 		might_sleep();
414 		if (obj->ops->delayed_free) {
415 			obj->ops->delayed_free(obj);
416 			continue;
417 		}
418 
419 		__i915_gem_object_pages_fini(obj);
420 		__i915_gem_free_object(obj);
421 
422 		/* But keep the pointer alive for RCU-protected lookups */
423 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
424 		cond_resched();
425 	}
426 }
427 
i915_gem_flush_free_objects(struct drm_i915_private * i915)428 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
429 {
430 	struct llist_node *freed = llist_del_all(&i915->mm.free_list);
431 
432 	if (unlikely(freed))
433 		__i915_gem_free_objects(i915, freed);
434 }
435 
__i915_gem_free_work(struct work_struct * work)436 static void __i915_gem_free_work(struct work_struct *work)
437 {
438 	struct drm_i915_private *i915 =
439 		container_of(work, struct drm_i915_private, mm.free_work);
440 
441 	i915_gem_flush_free_objects(i915);
442 }
443 
i915_gem_free_object(struct drm_gem_object * gem_obj)444 static void i915_gem_free_object(struct drm_gem_object *gem_obj)
445 {
446 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
447 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
448 
449 	GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
450 
451 	i915_drm_client_remove_object(obj);
452 
453 	/*
454 	 * Before we free the object, make sure any pure RCU-only
455 	 * read-side critical sections are complete, e.g.
456 	 * i915_gem_busy_ioctl(). For the corresponding synchronized
457 	 * lookup see i915_gem_object_lookup_rcu().
458 	 */
459 	atomic_inc(&i915->mm.free_count);
460 
461 	/*
462 	 * Since we require blocking on struct_mutex to unbind the freed
463 	 * object from the GPU before releasing resources back to the
464 	 * system, we can not do that directly from the RCU callback (which may
465 	 * be a softirq context), but must instead then defer that work onto a
466 	 * kthread. We use the RCU callback rather than move the freed object
467 	 * directly onto the work queue so that we can mix between using the
468 	 * worker and performing frees directly from subsequent allocations for
469 	 * crude but effective memory throttling.
470 	 */
471 
472 	if (llist_add(&obj->freed, &i915->mm.free_list))
473 		queue_work(i915->wq, &i915->mm.free_work);
474 }
475 
__i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)476 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
477 					 enum fb_op_origin origin)
478 {
479 	struct intel_frontbuffer *front;
480 
481 	front = i915_gem_object_get_frontbuffer(obj);
482 	if (front) {
483 		intel_frontbuffer_flush(front, origin);
484 		intel_frontbuffer_put(front);
485 	}
486 }
487 
__i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)488 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
489 					      enum fb_op_origin origin)
490 {
491 	struct intel_frontbuffer *front;
492 
493 	front = i915_gem_object_get_frontbuffer(obj);
494 	if (front) {
495 		intel_frontbuffer_invalidate(front, origin);
496 		intel_frontbuffer_put(front);
497 	}
498 }
499 
500 static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)501 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
502 {
503 	pgoff_t idx = offset >> PAGE_SHIFT;
504 	void *src_ptr;
505 
506 	src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
507 	          + offset_in_page(offset);
508 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
509 		drm_clflush_virt_range(src_ptr, size);
510 	memcpy(dst, src_ptr, size);
511 
512 	kunmap_local(src_ptr);
513 }
514 
515 static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)516 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
517 {
518 	pgoff_t idx = offset >> PAGE_SHIFT;
519 	dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
520 	void __iomem *src_map;
521 	void __iomem *src_ptr;
522 
523 	src_map = io_mapping_map_wc(&obj->mm.region->iomap,
524 				    dma - obj->mm.region->region.start,
525 				    PAGE_SIZE);
526 
527 	src_ptr = src_map + offset_in_page(offset);
528 	if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
529 		memcpy_fromio(dst, src_ptr, size);
530 
531 	io_mapping_unmap(src_map);
532 }
533 
object_has_mappable_iomem(struct drm_i915_gem_object * obj)534 static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
535 {
536 	GEM_BUG_ON(!i915_gem_object_has_iomem(obj));
537 
538 	if (IS_DGFX(to_i915(obj->base.dev)))
539 		return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource);
540 
541 	return true;
542 }
543 
544 /**
545  * i915_gem_object_read_from_page - read data from the page of a GEM object
546  * @obj: GEM object to read from
547  * @offset: offset within the object
548  * @dst: buffer to store the read data
549  * @size: size to read
550  *
551  * Reads data from @obj at the specified offset. The requested region to read
552  * from can't cross a page boundary. The caller must ensure that @obj pages
553  * are pinned and that @obj is synced wrt. any related writes.
554  *
555  * Return: %0 on success or -ENODEV if the type of @obj's backing store is
556  * unsupported.
557  */
i915_gem_object_read_from_page(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)558 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
559 {
560 	GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
561 	GEM_BUG_ON(offset >= obj->base.size);
562 	GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
563 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
564 
565 	if (i915_gem_object_has_struct_page(obj))
566 		i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
567 	else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj))
568 		i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
569 	else
570 		return -ENODEV;
571 
572 	return 0;
573 }
574 
575 /**
576  * i915_gem_object_evictable - Whether object is likely evictable after unbind.
577  * @obj: The object to check
578  *
579  * This function checks whether the object is likely unvictable after unbind.
580  * If the object is not locked when checking, the result is only advisory.
581  * If the object is locked when checking, and the function returns true,
582  * then an eviction should indeed be possible. But since unlocked vma
583  * unpinning and unbinding is currently possible, the object can actually
584  * become evictable even if this function returns false.
585  *
586  * Return: true if the object may be evictable. False otherwise.
587  */
i915_gem_object_evictable(struct drm_i915_gem_object * obj)588 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
589 {
590 	struct i915_vma *vma;
591 	int pin_count = atomic_read(&obj->mm.pages_pin_count);
592 
593 	if (!pin_count)
594 		return true;
595 
596 	spin_lock(&obj->vma.lock);
597 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
598 		if (i915_vma_is_pinned(vma)) {
599 			spin_unlock(&obj->vma.lock);
600 			return false;
601 		}
602 		if (atomic_read(&vma->pages_count))
603 			pin_count--;
604 	}
605 	spin_unlock(&obj->vma.lock);
606 	GEM_WARN_ON(pin_count < 0);
607 
608 	return pin_count == 0;
609 }
610 
611 /**
612  * i915_gem_object_migratable - Whether the object is migratable out of the
613  * current region.
614  * @obj: Pointer to the object.
615  *
616  * Return: Whether the object is allowed to be resident in other
617  * regions than the current while pages are present.
618  */
i915_gem_object_migratable(struct drm_i915_gem_object * obj)619 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
620 {
621 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
622 
623 	if (!mr)
624 		return false;
625 
626 	return obj->mm.n_placements > 1;
627 }
628 
629 /**
630  * i915_gem_object_has_struct_page - Whether the object is page-backed
631  * @obj: The object to query.
632  *
633  * This function should only be called while the object is locked or pinned,
634  * otherwise the page backing may change under the caller.
635  *
636  * Return: True if page-backed, false otherwise.
637  */
i915_gem_object_has_struct_page(const struct drm_i915_gem_object * obj)638 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
639 {
640 #ifdef CONFIG_LOCKDEP
641 	if (IS_DGFX(to_i915(obj->base.dev)) &&
642 	    i915_gem_object_evictable((void __force *)obj))
643 		assert_object_held_shared(obj);
644 #endif
645 	return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
646 }
647 
648 /**
649  * i915_gem_object_has_iomem - Whether the object is iomem-backed
650  * @obj: The object to query.
651  *
652  * This function should only be called while the object is locked or pinned,
653  * otherwise the iomem backing may change under the caller.
654  *
655  * Return: True if iomem-backed, false otherwise.
656  */
i915_gem_object_has_iomem(const struct drm_i915_gem_object * obj)657 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
658 {
659 #ifdef CONFIG_LOCKDEP
660 	if (IS_DGFX(to_i915(obj->base.dev)) &&
661 	    i915_gem_object_evictable((void __force *)obj))
662 		assert_object_held_shared(obj);
663 #endif
664 	return obj->mem_flags & I915_BO_FLAG_IOMEM;
665 }
666 
667 /**
668  * i915_gem_object_can_migrate - Whether an object likely can be migrated
669  *
670  * @obj: The object to migrate
671  * @id: The region intended to migrate to
672  *
673  * Check whether the object backend supports migration to the
674  * given region. Note that pinning may affect the ability to migrate as
675  * returned by this function.
676  *
677  * This function is primarily intended as a helper for checking the
678  * possibility to migrate objects and might be slightly less permissive
679  * than i915_gem_object_migrate() when it comes to objects with the
680  * I915_BO_ALLOC_USER flag set.
681  *
682  * Return: true if migration is possible, false otherwise.
683  */
i915_gem_object_can_migrate(struct drm_i915_gem_object * obj,enum intel_region_id id)684 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
685 				 enum intel_region_id id)
686 {
687 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
688 	unsigned int num_allowed = obj->mm.n_placements;
689 	struct intel_memory_region *mr;
690 	unsigned int i;
691 
692 	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
693 	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
694 
695 	mr = i915->mm.regions[id];
696 	if (!mr)
697 		return false;
698 
699 	if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
700 		return false;
701 
702 	if (obj->mm.region == mr)
703 		return true;
704 
705 	if (!i915_gem_object_evictable(obj))
706 		return false;
707 
708 	if (!obj->ops->migrate)
709 		return false;
710 
711 	if (!(obj->flags & I915_BO_ALLOC_USER))
712 		return true;
713 
714 	if (num_allowed == 0)
715 		return false;
716 
717 	for (i = 0; i < num_allowed; ++i) {
718 		if (mr == obj->mm.placements[i])
719 			return true;
720 	}
721 
722 	return false;
723 }
724 
725 /**
726  * i915_gem_object_migrate - Migrate an object to the desired region id
727  * @obj: The object to migrate.
728  * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
729  * not be successful in evicting other objects to make room for this object.
730  * @id: The region id to migrate to.
731  *
732  * Attempt to migrate the object to the desired memory region. The
733  * object backend must support migration and the object may not be
734  * pinned, (explicitly pinned pages or pinned vmas). The object must
735  * be locked.
736  * On successful completion, the object will have pages pointing to
737  * memory in the new region, but an async migration task may not have
738  * completed yet, and to accomplish that, i915_gem_object_wait_migration()
739  * must be called.
740  *
741  * Note: the @ww parameter is not used yet, but included to make sure
742  * callers put some effort into obtaining a valid ww ctx if one is
743  * available.
744  *
745  * Return: 0 on success. Negative error code on failure. In particular may
746  * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
747  * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
748  * -EBUSY if the object is pinned.
749  */
i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id)750 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
751 			    struct i915_gem_ww_ctx *ww,
752 			    enum intel_region_id id)
753 {
754 	return __i915_gem_object_migrate(obj, ww, id, obj->flags);
755 }
756 
757 /**
758  * __i915_gem_object_migrate - Migrate an object to the desired region id, with
759  * control of the extra flags
760  * @obj: The object to migrate.
761  * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
762  * not be successful in evicting other objects to make room for this object.
763  * @id: The region id to migrate to.
764  * @flags: The object flags. Normally just obj->flags.
765  *
766  * Attempt to migrate the object to the desired memory region. The
767  * object backend must support migration and the object may not be
768  * pinned, (explicitly pinned pages or pinned vmas). The object must
769  * be locked.
770  * On successful completion, the object will have pages pointing to
771  * memory in the new region, but an async migration task may not have
772  * completed yet, and to accomplish that, i915_gem_object_wait_migration()
773  * must be called.
774  *
775  * Note: the @ww parameter is not used yet, but included to make sure
776  * callers put some effort into obtaining a valid ww ctx if one is
777  * available.
778  *
779  * Return: 0 on success. Negative error code on failure. In particular may
780  * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
781  * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
782  * -EBUSY if the object is pinned.
783  */
__i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id,unsigned int flags)784 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
785 			      struct i915_gem_ww_ctx *ww,
786 			      enum intel_region_id id,
787 			      unsigned int flags)
788 {
789 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
790 	struct intel_memory_region *mr;
791 
792 	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
793 	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
794 	assert_object_held(obj);
795 
796 	mr = i915->mm.regions[id];
797 	GEM_BUG_ON(!mr);
798 
799 	if (!i915_gem_object_can_migrate(obj, id))
800 		return -EINVAL;
801 
802 	if (!obj->ops->migrate) {
803 		if (GEM_WARN_ON(obj->mm.region != mr))
804 			return -EINVAL;
805 		return 0;
806 	}
807 
808 	return obj->ops->migrate(obj, mr, flags);
809 }
810 
811 /**
812  * i915_gem_object_placement_possible - Check whether the object can be
813  * placed at certain memory type
814  * @obj: Pointer to the object
815  * @type: The memory type to check
816  *
817  * Return: True if the object can be placed in @type. False otherwise.
818  */
i915_gem_object_placement_possible(struct drm_i915_gem_object * obj,enum intel_memory_type type)819 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
820 					enum intel_memory_type type)
821 {
822 	unsigned int i;
823 
824 	if (!obj->mm.n_placements) {
825 		switch (type) {
826 		case INTEL_MEMORY_LOCAL:
827 			return i915_gem_object_has_iomem(obj);
828 		case INTEL_MEMORY_SYSTEM:
829 			return i915_gem_object_has_pages(obj);
830 		default:
831 			/* Ignore stolen for now */
832 			GEM_BUG_ON(1);
833 			return false;
834 		}
835 	}
836 
837 	for (i = 0; i < obj->mm.n_placements; i++) {
838 		if (obj->mm.placements[i]->type == type)
839 			return true;
840 	}
841 
842 	return false;
843 }
844 
845 /**
846  * i915_gem_object_needs_ccs_pages - Check whether the object requires extra
847  * pages when placed in system-memory, in order to save and later restore the
848  * flat-CCS aux state when the object is moved between local-memory and
849  * system-memory
850  * @obj: Pointer to the object
851  *
852  * Return: True if the object needs extra ccs pages. False otherwise.
853  */
i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object * obj)854 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
855 {
856 	bool lmem_placement = false;
857 	int i;
858 
859 	if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
860 		return false;
861 
862 	if (obj->flags & I915_BO_ALLOC_CCS_AUX)
863 		return true;
864 
865 	for (i = 0; i < obj->mm.n_placements; i++) {
866 		/* Compression is not allowed for the objects with smem placement */
867 		if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
868 			return false;
869 		if (!lmem_placement &&
870 		    obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
871 			lmem_placement = true;
872 	}
873 
874 	return lmem_placement;
875 }
876 
i915_gem_vmap_object(struct drm_gem_object * gem_obj,struct iosys_map * map)877 static int i915_gem_vmap_object(struct drm_gem_object *gem_obj,
878 				struct iosys_map *map)
879 {
880 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
881 	void *vaddr;
882 
883 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
884 	if (IS_ERR(vaddr))
885 		return PTR_ERR(vaddr);
886 
887 	iosys_map_set_vaddr(map, vaddr);
888 
889 	return 0;
890 }
891 
i915_gem_vunmap_object(struct drm_gem_object * gem_obj,struct iosys_map * map)892 static void i915_gem_vunmap_object(struct drm_gem_object *gem_obj,
893 				   struct iosys_map *map)
894 {
895 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
896 
897 	i915_gem_object_flush_map(obj);
898 	i915_gem_object_unpin_map(obj);
899 }
900 
i915_gem_init__objects(struct drm_i915_private * i915)901 void i915_gem_init__objects(struct drm_i915_private *i915)
902 {
903 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
904 }
905 
i915_objects_module_exit(void)906 void i915_objects_module_exit(void)
907 {
908 	kmem_cache_destroy(slab_objects);
909 }
910 
i915_objects_module_init(void)911 int __init i915_objects_module_init(void)
912 {
913 	slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
914 	if (!slab_objects)
915 		return -ENOMEM;
916 
917 	return 0;
918 }
919 
920 static const struct drm_gem_object_funcs i915_gem_object_funcs = {
921 	.free = i915_gem_free_object,
922 	.close = i915_gem_close_object,
923 	.export = i915_gem_prime_export,
924 	.vmap = i915_gem_vmap_object,
925 	.vunmap = i915_gem_vunmap_object,
926 };
927 
928 /**
929  * i915_gem_object_get_moving_fence - Get the object's moving fence if any
930  * @obj: The object whose moving fence to get.
931  * @fence: The resulting fence
932  *
933  * A non-signaled moving fence means that there is an async operation
934  * pending on the object that needs to be waited on before setting up
935  * any GPU- or CPU PTEs to the object's pages.
936  *
937  * Return: Negative error code or 0 for success.
938  */
i915_gem_object_get_moving_fence(struct drm_i915_gem_object * obj,struct dma_fence ** fence)939 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
940 				     struct dma_fence **fence)
941 {
942 	return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
943 				      fence);
944 }
945 
946 /**
947  * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
948  * @obj: The object whose moving fence to wait for.
949  * @intr: Whether to wait interruptible.
950  *
951  * If the moving fence signaled without an error, it is detached from the
952  * object and put.
953  *
954  * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
955  * negative error code if the async operation represented by the
956  * moving fence failed.
957  */
i915_gem_object_wait_moving_fence(struct drm_i915_gem_object * obj,bool intr)958 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
959 				      bool intr)
960 {
961 	long ret;
962 
963 	assert_object_held(obj);
964 
965 	ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
966 				    intr, MAX_SCHEDULE_TIMEOUT);
967 	if (!ret)
968 		ret = -ETIME;
969 	else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
970 		ret = -EIO;
971 
972 	return ret < 0 ? ret : 0;
973 }
974 
975 /*
976  * i915_gem_object_has_unknown_state - Return true if the object backing pages are
977  * in an unknown_state. This means that userspace must NEVER be allowed to touch
978  * the pages, with either the GPU or CPU.
979  *
980  * ONLY valid to be called after ensuring that all kernel fences have signalled
981  * (in particular the fence for moving/clearing the object).
982  */
i915_gem_object_has_unknown_state(struct drm_i915_gem_object * obj)983 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
984 {
985 	/*
986 	 * The below barrier pairs with the dma_fence_signal() in
987 	 * __memcpy_work(). We should only sample the unknown_state after all
988 	 * the kernel fences have signalled.
989 	 */
990 	smp_rmb();
991 	return obj->mm.unknown_state;
992 }
993 
994 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
995 #include "selftests/huge_gem_object.c"
996 #include "selftests/huge_pages.c"
997 #include "selftests/i915_gem_migrate.c"
998 #include "selftests/i915_gem_object.c"
999 #include "selftests/i915_gem_coherency.c"
1000 #endif
1001