xref: /linux/drivers/gpu/drm/msm/msm_gem_shrinker.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
9 
10 #include "msm_drv.h"
11 #include "msm_gem.h"
12 #include "msm_gpu.h"
13 #include "msm_gpu_trace.h"
14 
15 /* Default disabled for now until it has some more testing on the different
16  * iommu combinations that can be paired with the driver:
17  */
18 static bool enable_eviction = true;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
21 
can_swap(void)22 static bool can_swap(void)
23 {
24 	return enable_eviction && get_nr_swap_pages() > 0;
25 }
26 
can_block(struct shrink_control * sc)27 static bool can_block(struct shrink_control *sc)
28 {
29 	if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 		return false;
31 	return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32 }
33 
34 static unsigned long
msm_gem_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36 {
37 	struct msm_drm_private *priv = shrinker->private_data;
38 	unsigned count = priv->lru.dontneed.count;
39 
40 	if (can_swap())
41 		count += priv->lru.willneed.count;
42 
43 	return count;
44 }
45 
46 static bool
with_vm_locks(struct ww_acquire_ctx * ticket,void (* fn)(struct drm_gem_object * obj),struct drm_gem_object * obj)47 with_vm_locks(struct ww_acquire_ctx *ticket,
48 	      void (*fn)(struct drm_gem_object *obj),
49 	      struct drm_gem_object *obj)
50 {
51 	/*
52 	 * Track last locked entry for for unwinding locks in error and
53 	 * success paths
54 	 */
55 	struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
56 	int ret = 0;
57 
58 	drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
59 		struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
60 
61 		if (resv == obj->resv)
62 			continue;
63 
64 		ret = dma_resv_lock(resv, ticket);
65 
66 		/*
67 		 * Since we already skip the case when the VM and obj
68 		 * share a resv (ie. _NO_SHARE objs), we don't expect
69 		 * to hit a double-locking scenario... which the lock
70 		 * unwinding cannot really cope with.
71 		 */
72 		WARN_ON(ret == -EALREADY);
73 
74 		/*
75 		 * Don't bother with slow-lock / backoff / retry sequence,
76 		 * if we can't get the lock just give up and move on to
77 		 * the next object.
78 		 */
79 		if (ret)
80 			goto out_unlock;
81 
82 		/*
83 		 * Hold a ref to prevent the vm_bo from being freed
84 		 * and removed from the obj's gpuva list, as that would
85 		 * would result in missing the unlock below
86 		 */
87 		drm_gpuvm_bo_get(vm_bo);
88 
89 		last_locked = vm_bo;
90 	}
91 
92 	fn(obj);
93 
94 out_unlock:
95 	if (last_locked) {
96 		drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
97 			struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
98 
99 			if (resv == obj->resv)
100 				continue;
101 
102 			dma_resv_unlock(resv);
103 
104 			/* Drop the ref taken while locking: */
105 			drm_gpuvm_bo_put(vm_bo);
106 
107 			if (last_locked == vm_bo)
108 				break;
109 		}
110 	}
111 
112 	return ret == 0;
113 }
114 
115 static bool
purge(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket)116 purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
117 {
118 	if (!is_purgeable(to_msm_bo(obj)))
119 		return false;
120 
121 	if (msm_gem_active(obj))
122 		return false;
123 
124 	return with_vm_locks(ticket, msm_gem_purge, obj);
125 }
126 
127 static bool
evict(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket)128 evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
129 {
130 	if (is_unevictable(to_msm_bo(obj)))
131 		return false;
132 
133 	if (msm_gem_active(obj))
134 		return false;
135 
136 	return with_vm_locks(ticket, msm_gem_evict, obj);
137 }
138 
139 static bool
wait_for_idle(struct drm_gem_object * obj)140 wait_for_idle(struct drm_gem_object *obj)
141 {
142 	enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
143 	return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
144 }
145 
146 static bool
active_purge(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket)147 active_purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
148 {
149 	if (!wait_for_idle(obj))
150 		return false;
151 
152 	return purge(obj, ticket);
153 }
154 
155 static bool
active_evict(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket)156 active_evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
157 {
158 	if (!wait_for_idle(obj))
159 		return false;
160 
161 	return evict(obj, ticket);
162 }
163 
164 static unsigned long
msm_gem_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)165 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
166 {
167 	struct msm_drm_private *priv = shrinker->private_data;
168 	struct ww_acquire_ctx ticket;
169 	struct {
170 		struct drm_gem_lru *lru;
171 		bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
172 		bool cond;
173 		unsigned long freed;
174 		unsigned long remaining;
175 	} stages[] = {
176 		/* Stages of progressively more aggressive/expensive reclaim: */
177 		{ &priv->lru.dontneed, purge,        true },
178 		{ &priv->lru.willneed, evict,        can_swap() },
179 		{ &priv->lru.dontneed, active_purge, can_block(sc) },
180 		{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
181 	};
182 	long nr = sc->nr_to_scan;
183 	unsigned long freed = 0;
184 	unsigned long remaining = 0;
185 
186 	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
187 		if (!stages[i].cond)
188 			continue;
189 		stages[i].freed =
190 			drm_gem_lru_scan(stages[i].lru, nr,
191 					 &stages[i].remaining,
192 					 stages[i].shrink,
193 					 &ticket);
194 		nr -= stages[i].freed;
195 		freed += stages[i].freed;
196 		remaining += stages[i].remaining;
197 	}
198 
199 	if (freed) {
200 		trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
201 				     stages[1].freed, stages[2].freed,
202 				     stages[3].freed);
203 	}
204 
205 	return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
206 }
207 
208 #ifdef CONFIG_DEBUG_FS
209 unsigned long
msm_gem_shrinker_shrink(struct drm_device * dev,unsigned long nr_to_scan)210 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
211 {
212 	struct msm_drm_private *priv = dev->dev_private;
213 	struct shrink_control sc = {
214 		.nr_to_scan = nr_to_scan,
215 	};
216 	unsigned long ret = SHRINK_STOP;
217 
218 	fs_reclaim_acquire(GFP_KERNEL);
219 	if (priv->shrinker)
220 		ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
221 	fs_reclaim_release(GFP_KERNEL);
222 
223 	return ret;
224 }
225 #endif
226 
227 /* since we don't know any better, lets bail after a few
228  * and if necessary the shrinker will be invoked again.
229  * Seems better than unmapping *everything*
230  */
231 static const int vmap_shrink_limit = 15;
232 
233 static bool
vmap_shrink(struct drm_gem_object * obj,struct ww_acquire_ctx * ticket)234 vmap_shrink(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
235 {
236 	if (!is_vunmapable(to_msm_bo(obj)))
237 		return false;
238 
239 	msm_gem_vunmap(obj);
240 
241 	return true;
242 }
243 
244 static int
msm_gem_shrinker_vmap(struct notifier_block * nb,unsigned long event,void * ptr)245 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
246 {
247 	struct msm_drm_private *priv =
248 		container_of(nb, struct msm_drm_private, vmap_notifier);
249 	struct drm_gem_lru *lrus[] = {
250 		&priv->lru.dontneed,
251 		&priv->lru.willneed,
252 		&priv->lru.pinned,
253 		NULL,
254 	};
255 	unsigned idx, unmapped = 0;
256 	unsigned long remaining = 0;
257 
258 	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
259 		unmapped += drm_gem_lru_scan(lrus[idx],
260 					     vmap_shrink_limit - unmapped,
261 					     &remaining,
262 					     vmap_shrink,
263 					     NULL);
264 	}
265 
266 	*(unsigned long *)ptr += unmapped;
267 
268 	if (unmapped > 0)
269 		trace_msm_gem_purge_vmaps(unmapped);
270 
271 	return NOTIFY_DONE;
272 }
273 
274 /**
275  * msm_gem_shrinker_init - Initialize msm shrinker
276  * @dev: drm device
277  *
278  * This function registers and sets up the msm shrinker.
279  */
msm_gem_shrinker_init(struct drm_device * dev)280 int msm_gem_shrinker_init(struct drm_device *dev)
281 {
282 	struct msm_drm_private *priv = dev->dev_private;
283 
284 	priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
285 	if (!priv->shrinker)
286 		return -ENOMEM;
287 
288 	priv->shrinker->count_objects = msm_gem_shrinker_count;
289 	priv->shrinker->scan_objects = msm_gem_shrinker_scan;
290 	priv->shrinker->private_data = priv;
291 
292 	shrinker_register(priv->shrinker);
293 
294 	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
295 	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
296 
297 	return 0;
298 }
299 
300 /**
301  * msm_gem_shrinker_cleanup - Clean up msm shrinker
302  * @dev: drm device
303  *
304  * This function unregisters the msm shrinker.
305  */
msm_gem_shrinker_cleanup(struct drm_device * dev)306 void msm_gem_shrinker_cleanup(struct drm_device *dev)
307 {
308 	struct msm_drm_private *priv = dev->dev_private;
309 
310 	if (priv->shrinker) {
311 		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
312 		shrinker_free(priv->shrinker);
313 	}
314 }
315