1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #include <linux/shrinker.h>
7
8 #include <drm/ttm/ttm_backup.h>
9 #include <drm/ttm/ttm_bo.h>
10 #include <drm/ttm/ttm_tt.h>
11
12 #include "xe_bo.h"
13 #include "xe_pm.h"
14 #include "xe_shrinker.h"
15
16 /**
17 * struct xe_shrinker - per-device shrinker
18 * @xe: Back pointer to the device.
19 * @lock: Lock protecting accounting.
20 * @shrinkable_pages: Number of pages that are currently shrinkable.
21 * @purgeable_pages: Number of pages that are currently purgeable.
22 * @shrink: Pointer to the mm shrinker.
23 * @pm_worker: Worker to wake up the device if required.
24 */
25 struct xe_shrinker {
26 struct xe_device *xe;
27 rwlock_t lock;
28 long shrinkable_pages;
29 long purgeable_pages;
30 struct shrinker *shrink;
31 struct work_struct pm_worker;
32 };
33
to_xe_shrinker(struct shrinker * shrink)34 static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
35 {
36 return shrink->private_data;
37 }
38
39 /**
40 * xe_shrinker_mod_pages() - Modify shrinker page accounting
41 * @shrinker: Pointer to the struct xe_shrinker.
42 * @shrinkable: Shrinkable pages delta. May be negative.
43 * @purgeable: Purgeable page delta. May be negative.
44 *
45 * Modifies the shrinkable and purgeable pages accounting.
46 */
47 void
xe_shrinker_mod_pages(struct xe_shrinker * shrinker,long shrinkable,long purgeable)48 xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
49 {
50 write_lock(&shrinker->lock);
51 shrinker->shrinkable_pages += shrinkable;
52 shrinker->purgeable_pages += purgeable;
53 write_unlock(&shrinker->lock);
54 }
55
xe_shrinker_walk(struct xe_device * xe,struct ttm_operation_ctx * ctx,const struct xe_bo_shrink_flags flags,unsigned long to_scan,unsigned long * scanned)56 static s64 xe_shrinker_walk(struct xe_device *xe,
57 struct ttm_operation_ctx *ctx,
58 const struct xe_bo_shrink_flags flags,
59 unsigned long to_scan, unsigned long *scanned)
60 {
61 unsigned int mem_type;
62 s64 freed = 0, lret;
63
64 for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
65 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
66 struct ttm_bo_lru_cursor curs;
67 struct ttm_buffer_object *ttm_bo;
68
69 if (!man || !man->use_tt)
70 continue;
71
72 ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
73 if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
74 continue;
75
76 lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
77 if (lret < 0)
78 return lret;
79
80 freed += lret;
81 if (*scanned >= to_scan)
82 break;
83 }
84 }
85
86 return freed;
87 }
88
89 static unsigned long
xe_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)90 xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
91 {
92 struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
93 unsigned long num_pages;
94 bool can_backup = !!(sc->gfp_mask & __GFP_FS);
95
96 num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
97 read_lock(&shrinker->lock);
98
99 if (can_backup)
100 num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
101 else
102 num_pages = 0;
103
104 num_pages += shrinker->purgeable_pages;
105 read_unlock(&shrinker->lock);
106
107 return num_pages ? num_pages : SHRINK_EMPTY;
108 }
109
110 /*
111 * Check if we need runtime pm, and if so try to grab a reference if
112 * already active. If grabbing a reference fails, queue a worker that
113 * does it for us outside of reclaim, but don't wait for it to complete.
114 * If bo shrinking needs an rpm reference and we don't have it (yet),
115 * that bo will be skipped anyway.
116 */
xe_shrinker_runtime_pm_get(struct xe_shrinker * shrinker,bool force,unsigned long nr_to_scan,bool can_backup)117 static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
118 unsigned long nr_to_scan, bool can_backup)
119 {
120 struct xe_device *xe = shrinker->xe;
121
122 if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
123 !ttm_backup_bytes_avail())
124 return false;
125
126 if (!force) {
127 read_lock(&shrinker->lock);
128 force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
129 read_unlock(&shrinker->lock);
130 if (!force)
131 return false;
132 }
133
134 if (!xe_pm_runtime_get_if_active(xe)) {
135 if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
136 xe_pm_runtime_get(xe);
137 return true;
138 }
139 queue_work(xe->unordered_wq, &shrinker->pm_worker);
140 return false;
141 }
142
143 return true;
144 }
145
xe_shrinker_runtime_pm_put(struct xe_shrinker * shrinker,bool runtime_pm)146 static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
147 {
148 if (runtime_pm)
149 xe_pm_runtime_put(shrinker->xe);
150 }
151
xe_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)152 static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
153 {
154 struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
155 struct ttm_operation_ctx ctx = {
156 .interruptible = false,
157 .no_wait_gpu = ttm_bo_shrink_avoid_wait(),
158 };
159 unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
160 struct xe_bo_shrink_flags shrink_flags = {
161 .purge = true,
162 /* Don't request writeback without __GFP_IO. */
163 .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
164 };
165 bool runtime_pm;
166 bool purgeable;
167 bool can_backup = !!(sc->gfp_mask & __GFP_FS);
168 s64 lret;
169
170 nr_to_scan = sc->nr_to_scan;
171
172 read_lock(&shrinker->lock);
173 purgeable = !!shrinker->purgeable_pages;
174 read_unlock(&shrinker->lock);
175
176 /* Might need runtime PM. Try to wake early if it looks like it. */
177 runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
178
179 if (purgeable && nr_scanned < nr_to_scan) {
180 lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
181 nr_to_scan, &nr_scanned);
182 if (lret >= 0)
183 freed += lret;
184 }
185
186 sc->nr_scanned = nr_scanned;
187 if (nr_scanned >= nr_to_scan || !can_backup)
188 goto out;
189
190 /* If we didn't wake before, try to do it now if needed. */
191 if (!runtime_pm)
192 runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
193
194 shrink_flags.purge = false;
195 lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
196 nr_to_scan, &nr_scanned);
197 if (lret >= 0)
198 freed += lret;
199
200 sc->nr_scanned = nr_scanned;
201 out:
202 xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
203 return nr_scanned ? freed : SHRINK_STOP;
204 }
205
206 /* Wake up the device for shrinking. */
xe_shrinker_pm(struct work_struct * work)207 static void xe_shrinker_pm(struct work_struct *work)
208 {
209 struct xe_shrinker *shrinker =
210 container_of(work, typeof(*shrinker), pm_worker);
211
212 xe_pm_runtime_get(shrinker->xe);
213 xe_pm_runtime_put(shrinker->xe);
214 }
215
216 /**
217 * xe_shrinker_create() - Create an xe per-device shrinker
218 * @xe: Pointer to the xe device.
219 *
220 * Returns: A pointer to the created shrinker on success,
221 * Negative error code on failure.
222 */
xe_shrinker_create(struct xe_device * xe)223 struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
224 {
225 struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
226
227 if (!shrinker)
228 return ERR_PTR(-ENOMEM);
229
230 shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
231 if (!shrinker->shrink) {
232 kfree(shrinker);
233 return ERR_PTR(-ENOMEM);
234 }
235
236 INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
237 shrinker->xe = xe;
238 rwlock_init(&shrinker->lock);
239 shrinker->shrink->count_objects = xe_shrinker_count;
240 shrinker->shrink->scan_objects = xe_shrinker_scan;
241 shrinker->shrink->private_data = shrinker;
242 shrinker_register(shrinker->shrink);
243
244 return shrinker;
245 }
246
247 /**
248 * xe_shrinker_destroy() - Destroy an xe per-device shrinker
249 * @shrinker: Pointer to the shrinker to destroy.
250 */
xe_shrinker_destroy(struct xe_shrinker * shrinker)251 void xe_shrinker_destroy(struct xe_shrinker *shrinker)
252 {
253 xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
254 xe_assert(shrinker->xe, !shrinker->purgeable_pages);
255 shrinker_free(shrinker->shrink);
256 flush_work(&shrinker->pm_worker);
257 kfree(shrinker);
258 }
259