1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright © 2025 Intel Corporation
4 */
5
6 #include <linux/slab.h>
7
8 #include <drm/drm_drv.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_pagemap.h>
11 #include <drm/drm_pagemap_util.h>
12 #include <drm/drm_print.h>
13
14 /**
15 * struct drm_pagemap_cache - Lookup structure for pagemaps
16 *
17 * Structure to keep track of active (refcount > 1) and inactive
18 * (refcount == 0) pagemaps. Inactive pagemaps can be made active
19 * again by waiting for the @queued completion (indicating that the
20 * pagemap has been put on the @shrinker's list of shrinkable
21 * pagemaps, and then successfully removing it from @shrinker's
22 * list. The latter may fail if the shrinker is already in the
23 * process of freeing the pagemap. A struct drm_pagemap_cache can
24 * hold a single struct drm_pagemap.
25 */
26 struct drm_pagemap_cache {
27 /** @lookup_mutex: Mutex making the lookup process atomic */
28 struct mutex lookup_mutex;
29 /** @lock: Lock protecting the @dpagemap pointer */
30 spinlock_t lock;
31 /** @shrinker: Pointer to the shrinker used for this cache. Immutable. */
32 struct drm_pagemap_shrinker *shrinker;
33 /** @dpagemap: Non-refcounted pointer to the drm_pagemap */
34 struct drm_pagemap *dpagemap;
35 /**
36 * @queued: Signals when an inactive drm_pagemap has been put on
37 * @shrinker's list.
38 */
39 struct completion queued;
40 };
41
42 /**
43 * struct drm_pagemap_shrinker - Shrinker to remove unused pagemaps
44 */
45 struct drm_pagemap_shrinker {
46 /** @drm: Pointer to the drm device. */
47 struct drm_device *drm;
48 /** @lock: Spinlock to protect the @dpagemaps list. */
49 spinlock_t lock;
50 /** @dpagemaps: List of unused dpagemaps. */
51 struct list_head dpagemaps;
52 /** @num_dpagemaps: Number of unused dpagemaps in @dpagemaps. */
53 atomic_t num_dpagemaps;
54 /** @shrink: Pointer to the struct shrinker. */
55 struct shrinker *shrink;
56 };
57
58 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
59
drm_pagemap_cache_fini(void * arg)60 static void drm_pagemap_cache_fini(void *arg)
61 {
62 struct drm_pagemap_cache *cache = arg;
63 struct drm_pagemap *dpagemap;
64
65 drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
66 spin_lock(&cache->lock);
67 dpagemap = cache->dpagemap;
68 cache->dpagemap = NULL;
69 if (dpagemap && !drm_pagemap_shrinker_cancel(dpagemap))
70 dpagemap = NULL;
71 spin_unlock(&cache->lock);
72
73 if (dpagemap)
74 drm_pagemap_destroy(dpagemap, false);
75
76 mutex_destroy(&cache->lookup_mutex);
77 kfree(cache);
78 }
79
80 /**
81 * drm_pagemap_cache_create_devm() - Create a drm_pagemap_cache
82 * @shrinker: Pointer to a struct drm_pagemap_shrinker.
83 *
84 * Create a device-managed drm_pagemap cache. The cache is automatically
85 * destroyed on struct device removal, at which point any *inactive*
86 * drm_pagemap's are destroyed.
87 *
88 * Return: Pointer to a struct drm_pagemap_cache on success. Error pointer
89 * on failure.
90 */
drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker * shrinker)91 struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
92 {
93 struct drm_pagemap_cache *cache = kzalloc_obj(*cache);
94 int err;
95
96 if (!cache)
97 return ERR_PTR(-ENOMEM);
98
99 mutex_init(&cache->lookup_mutex);
100 spin_lock_init(&cache->lock);
101 cache->shrinker = shrinker;
102 init_completion(&cache->queued);
103 err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
104 if (err)
105 return ERR_PTR(err);
106
107 return cache;
108 }
109 EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
110
111 /**
112 * DOC: Cache lookup
113 *
114 * Cache lookup should be done under a locked mutex, so that a
115 * failed drm_pagemap_get_from_cache() and a following
116 * drm_pagemap_cache_setpagemap() are carried out as an atomic
117 * operation WRT other lookups. Otherwise, racing lookups may
118 * unnecessarily concurrently create pagemaps to fulfill a
119 * failed lookup. The API provides two functions to perform this lock,
120 * drm_pagemap_lock_lookup() and drm_pagemap_unlock_lookup() and they
121 * should be used in the following way:
122 *
123 * .. code-block:: c
124 *
125 * drm_pagemap_lock_lookup(cache);
126 * dpagemap = drm_pagemap_get_from_cache(cache);
127 * if (dpagemap)
128 * goto out_unlock;
129 *
130 * dpagemap = driver_create_new_dpagemap();
131 * if (!IS_ERR(dpagemap))
132 * drm_pagemap_cache_set_pagemap(cache, dpagemap);
133 *
134 * out_unlock:
135 * drm_pagemap_unlock_lookup(cache);
136 */
137
138 /**
139 * drm_pagemap_cache_lock_lookup() - Lock a drm_pagemap_cache for lookup.
140 * @cache: The drm_pagemap_cache to lock.
141 *
142 * Return: %-EINTR if interrupted while blocking. %0 otherwise.
143 */
drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache * cache)144 int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
145 {
146 return mutex_lock_interruptible(&cache->lookup_mutex);
147 }
148 EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
149
150 /**
151 * drm_pagemap_cache_unlock_lookup() - Unlock a drm_pagemap_cache after lookup.
152 * @cache: The drm_pagemap_cache to unlock.
153 */
drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache * cache)154 void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
155 {
156 mutex_unlock(&cache->lookup_mutex);
157 }
158 EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
159
160 /**
161 * drm_pagemap_get_from_cache() - Lookup of drm_pagemaps.
162 * @cache: The cache used for lookup.
163 *
164 * If an active pagemap is present in the cache, it is immediately returned.
165 * If an inactive pagemap is present, it's removed from the shrinker list and
166 * an attempt is made to make it active.
167 * If no pagemap present or the attempt to make it active failed, %NULL is returned
168 * to indicate to the caller to create a new drm_pagemap and insert it into
169 * the cache.
170 *
171 * Return: A reference-counted pointer to a drm_pagemap if successful. An error
172 * pointer if an error occurred, or %NULL if no drm_pagemap was found and
173 * the caller should insert a new one.
174 */
drm_pagemap_get_from_cache(struct drm_pagemap_cache * cache)175 struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
176 {
177 struct drm_pagemap *dpagemap;
178 int err;
179
180 lockdep_assert_held(&cache->lookup_mutex);
181 retry:
182 spin_lock(&cache->lock);
183 dpagemap = cache->dpagemap;
184 if (drm_pagemap_get_unless_zero(dpagemap)) {
185 spin_unlock(&cache->lock);
186 return dpagemap;
187 }
188
189 if (!dpagemap) {
190 spin_unlock(&cache->lock);
191 return NULL;
192 }
193
194 if (!try_wait_for_completion(&cache->queued)) {
195 spin_unlock(&cache->lock);
196 err = wait_for_completion_interruptible(&cache->queued);
197 if (err)
198 return ERR_PTR(err);
199 goto retry;
200 }
201
202 if (drm_pagemap_shrinker_cancel(dpagemap)) {
203 cache->dpagemap = NULL;
204 spin_unlock(&cache->lock);
205 err = drm_pagemap_reinit(dpagemap);
206 if (err) {
207 drm_pagemap_destroy(dpagemap, false);
208 return ERR_PTR(err);
209 }
210 drm_pagemap_cache_set_pagemap(cache, dpagemap);
211 } else {
212 cache->dpagemap = NULL;
213 spin_unlock(&cache->lock);
214 dpagemap = NULL;
215 }
216
217 return dpagemap;
218 }
219 EXPORT_SYMBOL(drm_pagemap_get_from_cache);
220
221 /**
222 * drm_pagemap_cache_set_pagemap() - Assign a drm_pagemap to a drm_pagemap_cache
223 * @cache: The cache to assign the drm_pagemap to.
224 * @dpagemap: The drm_pagemap to assign.
225 *
226 * The function must be called to populate a drm_pagemap_cache only
227 * after a call to drm_pagemap_get_from_cache() returns NULL.
228 */
drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache * cache,struct drm_pagemap * dpagemap)229 void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
230 {
231 struct drm_device *drm = dpagemap->drm;
232
233 lockdep_assert_held(&cache->lookup_mutex);
234 spin_lock(&cache->lock);
235 dpagemap->cache = cache;
236 swap(cache->dpagemap, dpagemap);
237 reinit_completion(&cache->queued);
238 spin_unlock(&cache->lock);
239 drm_WARN_ON(drm, !!dpagemap);
240 }
241 EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
242
243 /**
244 * drm_pagemap_get_from_cache_if_active() - Quick lookup of active drm_pagemaps
245 * @cache: The cache to lookup from.
246 *
247 * Function that should be used to lookup a drm_pagemap that is already active.
248 * (refcount > 0).
249 *
250 * Return: A pointer to the cache's drm_pagemap if it's active; %NULL otherwise.
251 */
drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache * cache)252 struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
253 {
254 struct drm_pagemap *dpagemap;
255
256 spin_lock(&cache->lock);
257 dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
258 spin_unlock(&cache->lock);
259
260 return dpagemap;
261 }
262 EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
263
drm_pagemap_shrinker_cancel(struct drm_pagemap * dpagemap)264 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
265 {
266 struct drm_pagemap_cache *cache = dpagemap->cache;
267 struct drm_pagemap_shrinker *shrinker = cache->shrinker;
268
269 spin_lock(&shrinker->lock);
270 if (list_empty(&dpagemap->shrink_link)) {
271 spin_unlock(&shrinker->lock);
272 return false;
273 }
274
275 list_del_init(&dpagemap->shrink_link);
276 atomic_dec(&shrinker->num_dpagemaps);
277 spin_unlock(&shrinker->lock);
278 return true;
279 }
280
281 #ifdef CONFIG_PROVE_LOCKING
282 /**
283 * drm_pagemap_shrinker_might_lock() - lockdep test for drm_pagemap_shrinker_add()
284 * @dpagemap: The drm pagemap.
285 *
286 * The drm_pagemap_shrinker_add() function performs some locking.
287 * This function can be called in code-paths that might
288 * call drm_pagemap_shrinker_add() to detect any lockdep problems early.
289 */
drm_pagemap_shrinker_might_lock(struct drm_pagemap * dpagemap)290 void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
291 {
292 int idx;
293
294 if (drm_dev_enter(dpagemap->drm, &idx)) {
295 struct drm_pagemap_cache *cache = dpagemap->cache;
296
297 if (cache)
298 might_lock(&cache->shrinker->lock);
299
300 drm_dev_exit(idx);
301 }
302 }
303 #endif
304
305 /**
306 * drm_pagemap_shrinker_add() - Add a drm_pagemap to the shrinker list or destroy
307 * @dpagemap: The drm_pagemap.
308 *
309 * If @dpagemap is associated with a &struct drm_pagemap_cache AND the
310 * struct device backing the drm device is still alive, add @dpagemap to
311 * the &struct drm_pagemap_shrinker list of shrinkable drm_pagemaps.
312 *
313 * Otherwise destroy the pagemap directly using drm_pagemap_destroy().
314 *
315 * This is an internal function which is not intended to be exposed to drivers.
316 */
drm_pagemap_shrinker_add(struct drm_pagemap * dpagemap)317 void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
318 {
319 struct drm_pagemap_cache *cache;
320 struct drm_pagemap_shrinker *shrinker;
321 int idx;
322
323 /*
324 * The pagemap cache and shrinker are disabled at
325 * pci device remove time. After that, dpagemaps
326 * are freed directly.
327 */
328 if (!drm_dev_enter(dpagemap->drm, &idx))
329 goto out_no_cache;
330
331 cache = dpagemap->cache;
332 if (!cache) {
333 drm_dev_exit(idx);
334 goto out_no_cache;
335 }
336
337 shrinker = cache->shrinker;
338 spin_lock(&shrinker->lock);
339 list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
340 atomic_inc(&shrinker->num_dpagemaps);
341 spin_unlock(&shrinker->lock);
342 complete_all(&cache->queued);
343 drm_dev_exit(idx);
344 return;
345
346 out_no_cache:
347 drm_pagemap_destroy(dpagemap, true);
348 }
349
350 static unsigned long
drm_pagemap_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)351 drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
352 {
353 struct drm_pagemap_shrinker *shrinker = shrink->private_data;
354 unsigned long count = atomic_read(&shrinker->num_dpagemaps);
355
356 return count ? : SHRINK_EMPTY;
357 }
358
359 static unsigned long
drm_pagemap_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)360 drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
361 {
362 struct drm_pagemap_shrinker *shrinker = shrink->private_data;
363 struct drm_pagemap *dpagemap;
364 struct drm_pagemap_cache *cache;
365 unsigned long nr_freed = 0;
366
367 sc->nr_scanned = 0;
368 spin_lock(&shrinker->lock);
369 do {
370 dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
371 shrink_link);
372 if (!dpagemap)
373 break;
374
375 atomic_dec(&shrinker->num_dpagemaps);
376 list_del_init(&dpagemap->shrink_link);
377 spin_unlock(&shrinker->lock);
378
379 sc->nr_scanned++;
380 nr_freed++;
381
382 cache = dpagemap->cache;
383 spin_lock(&cache->lock);
384 cache->dpagemap = NULL;
385 spin_unlock(&cache->lock);
386
387 drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
388 drm_pagemap_destroy(dpagemap, true);
389 spin_lock(&shrinker->lock);
390 } while (sc->nr_scanned < sc->nr_to_scan);
391 spin_unlock(&shrinker->lock);
392
393 return sc->nr_scanned ? nr_freed : SHRINK_STOP;
394 }
395
drm_pagemap_shrinker_fini(void * arg)396 static void drm_pagemap_shrinker_fini(void *arg)
397 {
398 struct drm_pagemap_shrinker *shrinker = arg;
399
400 drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
401 drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
402 shrinker_free(shrinker->shrink);
403 kfree(shrinker);
404 }
405
406 /**
407 * drm_pagemap_shrinker_create_devm() - Create and register a pagemap shrinker
408 * @drm: The drm device
409 *
410 * Create and register a pagemap shrinker that shrinks unused pagemaps
411 * and thereby reduces memory footprint.
412 * The shrinker is drm_device managed and unregisters itself when
413 * the drm device is removed.
414 *
415 * Return: %0 on success, negative error code on failure.
416 */
drm_pagemap_shrinker_create_devm(struct drm_device * drm)417 struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
418 {
419 struct drm_pagemap_shrinker *shrinker;
420 struct shrinker *shrink;
421 int err;
422
423 shrinker = kzalloc_obj(*shrinker);
424 if (!shrinker)
425 return ERR_PTR(-ENOMEM);
426
427 shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
428 if (!shrink) {
429 kfree(shrinker);
430 return ERR_PTR(-ENOMEM);
431 }
432
433 spin_lock_init(&shrinker->lock);
434 INIT_LIST_HEAD(&shrinker->dpagemaps);
435 shrinker->drm = drm;
436 shrinker->shrink = shrink;
437 shrink->count_objects = drm_pagemap_shrinker_count;
438 shrink->scan_objects = drm_pagemap_shrinker_scan;
439 shrink->private_data = shrinker;
440 shrinker_register(shrink);
441
442 err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
443 if (err)
444 return ERR_PTR(err);
445
446 return shrinker;
447 }
448 EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
449
450 /**
451 * struct drm_pagemap_owner - Device interconnect group
452 * @kref: Reference count.
453 *
454 * A struct drm_pagemap_owner identifies a device interconnect group.
455 */
456 struct drm_pagemap_owner {
457 struct kref kref;
458 };
459
drm_pagemap_owner_release(struct kref * kref)460 static void drm_pagemap_owner_release(struct kref *kref)
461 {
462 kfree(container_of(kref, struct drm_pagemap_owner, kref));
463 }
464
465 /**
466 * drm_pagemap_release_owner() - Stop participating in an interconnect group
467 * @peer: Pointer to the struct drm_pagemap_peer used when joining the group
468 *
469 * Stop participating in an interconnect group. This function is typically
470 * called when a pagemap is removed to indicate that it doesn't need to
471 * be taken into account.
472 */
drm_pagemap_release_owner(struct drm_pagemap_peer * peer)473 void drm_pagemap_release_owner(struct drm_pagemap_peer *peer)
474 {
475 struct drm_pagemap_owner_list *owner_list = peer->list;
476
477 if (!owner_list)
478 return;
479
480 mutex_lock(&owner_list->lock);
481 list_del(&peer->link);
482 kref_put(&peer->owner->kref, drm_pagemap_owner_release);
483 peer->owner = NULL;
484 mutex_unlock(&owner_list->lock);
485 }
486 EXPORT_SYMBOL(drm_pagemap_release_owner);
487
488 /**
489 * typedef interconnect_fn - Callback function to identify fast interconnects
490 * @peer1: First endpoint.
491 * @peer2: Second endpont.
492 *
493 * The function returns %true iff @peer1 and @peer2 have a fast interconnect.
494 * Note that this is symmetrical. The function has no notion of client and provider,
495 * which may not be sufficient in some cases. However, since the callback is intended
496 * to guide in providing common pagemap owners, the notion of a common owner to
497 * indicate fast interconnects would then have to change as well.
498 *
499 * Return: %true iff @peer1 and @peer2 have a fast interconnect. Otherwise @false.
500 */
501 typedef bool (*interconnect_fn)(struct drm_pagemap_peer *peer1, struct drm_pagemap_peer *peer2);
502
503 /**
504 * drm_pagemap_acquire_owner() - Join an interconnect group
505 * @peer: A struct drm_pagemap_peer keeping track of the device interconnect
506 * @owner_list: Pointer to the owner_list, keeping track of all interconnects
507 * @has_interconnect: Callback function to determine whether two peers have a
508 * fast local interconnect.
509 *
510 * Repeatedly calls @has_interconnect for @peer and other peers on @owner_list to
511 * determine a set of peers for which @peer has a fast interconnect. That set will
512 * have common &struct drm_pagemap_owner, and upon successful return, @peer::owner
513 * will point to that struct, holding a reference, and @peer will be registered in
514 * @owner_list. If @peer doesn't have any fast interconnects to other @peers, a
515 * new unique &struct drm_pagemap_owner will be allocated for it, and that
516 * may be shared with other peers that, at a later point, are determined to have
517 * a fast interconnect with @peer.
518 *
519 * When @peer no longer participates in an interconnect group,
520 * drm_pagemap_release_owner() should be called to drop the reference on the
521 * struct drm_pagemap_owner.
522 *
523 * Return: %0 on success, negative error code on failure.
524 */
drm_pagemap_acquire_owner(struct drm_pagemap_peer * peer,struct drm_pagemap_owner_list * owner_list,interconnect_fn has_interconnect)525 int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
526 struct drm_pagemap_owner_list *owner_list,
527 interconnect_fn has_interconnect)
528 {
529 struct drm_pagemap_peer *cur_peer;
530 struct drm_pagemap_owner *owner = NULL;
531 bool interconnect = false;
532
533 mutex_lock(&owner_list->lock);
534 might_alloc(GFP_KERNEL);
535 list_for_each_entry(cur_peer, &owner_list->peers, link) {
536 if (cur_peer->owner != owner) {
537 if (owner && interconnect)
538 break;
539 owner = cur_peer->owner;
540 interconnect = true;
541 }
542 if (interconnect && !has_interconnect(peer, cur_peer))
543 interconnect = false;
544 }
545
546 if (!interconnect) {
547 owner = kmalloc_obj(*owner);
548 if (!owner) {
549 mutex_unlock(&owner_list->lock);
550 return -ENOMEM;
551 }
552 kref_init(&owner->kref);
553 list_add_tail(&peer->link, &owner_list->peers);
554 } else {
555 kref_get(&owner->kref);
556 list_add_tail(&peer->link, &cur_peer->link);
557 }
558 peer->owner = owner;
559 peer->list = owner_list;
560 mutex_unlock(&owner_list->lock);
561
562 return 0;
563 }
564 EXPORT_SYMBOL(drm_pagemap_acquire_owner);
565