xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_clflush.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016 Intel Corporation
4  */
5 
6 #include <drm/drm_cache.h>
7 
8 #include "i915_config.h"
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_gem_object_frontbuffer.h"
12 #include "i915_sw_fence_work.h"
13 #include "i915_trace.h"
14 
15 struct clflush {
16 	struct dma_fence_work base;
17 	struct drm_i915_gem_object *obj;
18 };
19 
__do_clflush(struct drm_i915_gem_object * obj)20 static void __do_clflush(struct drm_i915_gem_object *obj)
21 {
22 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
23 	drm_clflush_sg(obj->mm.pages);
24 
25 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
26 }
27 
clflush_work(struct dma_fence_work * base)28 static void clflush_work(struct dma_fence_work *base)
29 {
30 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
31 
32 	__do_clflush(clflush->obj);
33 }
34 
clflush_release(struct dma_fence_work * base)35 static void clflush_release(struct dma_fence_work *base)
36 {
37 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
38 
39 	i915_gem_object_unpin_pages(clflush->obj);
40 	i915_gem_object_put(clflush->obj);
41 }
42 
43 static const struct dma_fence_work_ops clflush_ops = {
44 	.name = "clflush",
45 	.work = clflush_work,
46 	.release = clflush_release,
47 };
48 
clflush_work_create(struct drm_i915_gem_object * obj)49 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
50 {
51 	struct clflush *clflush;
52 
53 	GEM_BUG_ON(!obj->cache_dirty);
54 
55 	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
56 	if (!clflush)
57 		return NULL;
58 
59 	if (__i915_gem_object_get_pages(obj) < 0) {
60 		kfree(clflush);
61 		return NULL;
62 	}
63 
64 	dma_fence_work_init(&clflush->base, &clflush_ops);
65 	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
66 
67 	return clflush;
68 }
69 
i915_gem_clflush_object(struct drm_i915_gem_object * obj,unsigned int flags)70 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
71 			     unsigned int flags)
72 {
73 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
74 	struct clflush *clflush;
75 
76 	assert_object_held(obj);
77 
78 	if (IS_DGFX(i915)) {
79 		WARN_ON_ONCE(obj->cache_dirty);
80 		return false;
81 	}
82 
83 	/*
84 	 * Stolen memory is always coherent with the GPU as it is explicitly
85 	 * marked as wc by the system, or the system is cache-coherent.
86 	 * Similarly, we only access struct pages through the CPU cache, so
87 	 * anything not backed by physical memory we consider to be always
88 	 * coherent and not need clflushing.
89 	 */
90 	if (!i915_gem_object_has_struct_page(obj)) {
91 		obj->cache_dirty = false;
92 		return false;
93 	}
94 
95 	/* If the GPU is snooping the contents of the CPU cache,
96 	 * we do not need to manually clear the CPU cache lines.  However,
97 	 * the caches are only snooped when the render cache is
98 	 * flushed/invalidated.  As we always have to emit invalidations
99 	 * and flushes when moving into and out of the RENDER domain, correct
100 	 * snooping behaviour occurs naturally as the result of our domain
101 	 * tracking.
102 	 */
103 	if (!(flags & I915_CLFLUSH_FORCE) &&
104 	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
105 		return false;
106 
107 	trace_i915_gem_object_clflush(obj);
108 
109 	clflush = NULL;
110 	if (!(flags & I915_CLFLUSH_SYNC) &&
111 	    dma_resv_reserve_fences(obj->base.resv, 1) == 0)
112 		clflush = clflush_work_create(obj);
113 	if (clflush) {
114 		i915_sw_fence_await_reservation(&clflush->base.chain,
115 						obj->base.resv, true,
116 						i915_fence_timeout(i915),
117 						I915_FENCE_GFP);
118 		dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
119 				   DMA_RESV_USAGE_KERNEL);
120 		dma_fence_work_commit(&clflush->base);
121 		/*
122 		 * We must have successfully populated the pages(since we are
123 		 * holding a pin on the pages as per the flush worker) to reach
124 		 * this point, which must mean we have already done the required
125 		 * flush-on-acquire, hence resetting cache_dirty here should be
126 		 * safe.
127 		 */
128 		obj->cache_dirty = false;
129 	} else if (obj->mm.pages) {
130 		__do_clflush(obj);
131 		obj->cache_dirty = false;
132 	} else {
133 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
134 	}
135 
136 	return true;
137 }
138