xref: /linux/drivers/gpu/drm/drm_managed.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Intel
4  *
5  * Based on drivers/base/devres.c
6  */
7 
8 #include <drm/drm_managed.h>
9 
10 #include <linux/export.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 
16 #include <drm/drm_device.h>
17 #include <drm/drm_print.h>
18 
19 #include "drm_internal.h"
20 
21 /**
22  * DOC: managed resources
23  *
24  * Inspired by struct &device managed resources, but tied to the lifetime of
25  * struct &drm_device, which can outlive the underlying physical device, usually
26  * when userspace has some open files and other handles to resources still open.
27  *
28  * Release actions can be added with drmm_add_action(), memory allocations can
29  * be done directly with drmm_kmalloc() and the related functions. Everything
30  * will be released on the final drm_dev_put() in reverse order of how the
31  * release actions have been added and memory has been allocated since driver
32  * loading started with devm_drm_dev_alloc().
33  *
34  * Note that release actions and managed memory can also be added and removed
35  * during the lifetime of the driver, all the functions are fully concurrent
36  * safe. But it is recommended to use managed resources only for resources that
37  * change rarely, if ever, during the lifetime of the &drm_device instance.
38  */
39 
40 struct drmres_node {
41 	struct list_head	entry;
42 	drmres_release_t	release;
43 	const char		*name;
44 	size_t			size;
45 };
46 
47 struct drmres {
48 	struct drmres_node		node;
49 	/*
50 	 * Some archs want to perform DMA into kmalloc caches
51 	 * and need a guaranteed alignment larger than
52 	 * the alignment of a 64-bit integer.
53 	 * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
54 	 * alignment for struct drmres when allocated by kmalloc().
55 	 */
56 	u8 __aligned(ARCH_DMA_MINALIGN) data[];
57 };
58 
free_dr(struct drmres * dr)59 static void free_dr(struct drmres *dr)
60 {
61 	kfree_const(dr->node.name);
62 	kfree(dr);
63 }
64 
drm_managed_release(struct drm_device * dev)65 void drm_managed_release(struct drm_device *dev)
66 {
67 	struct drmres *dr, *tmp;
68 
69 	drm_dbg_drmres(dev, "drmres release begin\n");
70 	list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
71 		drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
72 			       dr, dr->node.name, dr->node.size);
73 
74 		if (dr->node.release)
75 			dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
76 
77 		list_del(&dr->node.entry);
78 		free_dr(dr);
79 	}
80 	drm_dbg_drmres(dev, "drmres release end\n");
81 }
82 
83 /*
84  * Always inline so that kmalloc_track_caller tracks the actual interesting
85  * caller outside of drm_managed.c.
86  */
alloc_dr(drmres_release_t release,size_t size,gfp_t gfp,int nid)87 static __always_inline struct drmres * alloc_dr(drmres_release_t release,
88 						size_t size, gfp_t gfp, int nid)
89 {
90 	size_t tot_size;
91 	struct drmres *dr;
92 
93 	/* We must catch any near-SIZE_MAX cases that could overflow. */
94 	if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
95 		return NULL;
96 
97 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
98 	if (unlikely(!dr))
99 		return NULL;
100 
101 	memset(dr, 0, offsetof(struct drmres, data));
102 
103 	INIT_LIST_HEAD(&dr->node.entry);
104 	dr->node.release = release;
105 	dr->node.size = size;
106 
107 	return dr;
108 }
109 
del_dr(struct drm_device * dev,struct drmres * dr)110 static void del_dr(struct drm_device *dev, struct drmres *dr)
111 {
112 	list_del_init(&dr->node.entry);
113 
114 	drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
115 		       dr, dr->node.name, (unsigned long) dr->node.size);
116 }
117 
add_dr(struct drm_device * dev,struct drmres * dr)118 static void add_dr(struct drm_device *dev, struct drmres *dr)
119 {
120 	unsigned long flags;
121 
122 	spin_lock_irqsave(&dev->managed.lock, flags);
123 	list_add(&dr->node.entry, &dev->managed.resources);
124 	spin_unlock_irqrestore(&dev->managed.lock, flags);
125 
126 	drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
127 		       dr, dr->node.name, (unsigned long) dr->node.size);
128 }
129 
drmm_add_final_kfree(struct drm_device * dev,void * container)130 void drmm_add_final_kfree(struct drm_device *dev, void *container)
131 {
132 	WARN_ON(dev->managed.final_kfree);
133 	WARN_ON(dev < (struct drm_device *) container);
134 	WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
135 	dev->managed.final_kfree = container;
136 }
137 
__drmm_add_action(struct drm_device * dev,drmres_release_t action,void * data,const char * name)138 int __drmm_add_action(struct drm_device *dev,
139 		      drmres_release_t action,
140 		      void *data, const char *name)
141 {
142 	struct drmres *dr;
143 	void **void_ptr;
144 
145 	dr = alloc_dr(action, data ? sizeof(void*) : 0,
146 		      GFP_KERNEL | __GFP_ZERO,
147 		      dev_to_node(dev->dev));
148 	if (!dr) {
149 		drm_dbg_drmres(dev, "failed to add action %s for %p\n",
150 			       name, data);
151 		return -ENOMEM;
152 	}
153 
154 	dr->node.name = kstrdup_const(name, GFP_KERNEL);
155 	if (data) {
156 		void_ptr = (void **)&dr->data;
157 		*void_ptr = data;
158 	}
159 
160 	add_dr(dev, dr);
161 
162 	return 0;
163 }
164 EXPORT_SYMBOL(__drmm_add_action);
165 
__drmm_add_action_or_reset(struct drm_device * dev,drmres_release_t action,void * data,const char * name)166 int __drmm_add_action_or_reset(struct drm_device *dev,
167 			       drmres_release_t action,
168 			       void *data, const char *name)
169 {
170 	int ret;
171 
172 	ret = __drmm_add_action(dev, action, data, name);
173 	if (ret)
174 		action(dev, data);
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL(__drmm_add_action_or_reset);
179 
180 /**
181  * drmm_release_action - release a managed action from a &drm_device
182  * @dev: DRM device
183  * @action: function which would be called when @dev is released
184  * @data: opaque pointer, passed to @action
185  *
186  * This function calls the @action previously added by drmm_add_action()
187  * immediately.
188  * The @action is removed from the list of cleanup actions for @dev,
189  * which means that it won't be called in the final drm_dev_put().
190  */
drmm_release_action(struct drm_device * dev,drmres_release_t action,void * data)191 void drmm_release_action(struct drm_device *dev,
192 			 drmres_release_t action,
193 			 void *data)
194 {
195 	struct drmres *dr_match = NULL, *dr;
196 	unsigned long flags;
197 
198 	spin_lock_irqsave(&dev->managed.lock, flags);
199 	list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
200 		if (dr->node.release == action) {
201 			if (!data || *(void **)dr->data == data) {
202 				dr_match = dr;
203 				del_dr(dev, dr_match);
204 				break;
205 			}
206 		}
207 	}
208 	spin_unlock_irqrestore(&dev->managed.lock, flags);
209 
210 	if (WARN_ON(!dr_match))
211 		return;
212 
213 	action(dev, data);
214 
215 	free_dr(dr_match);
216 }
217 EXPORT_SYMBOL(drmm_release_action);
218 
219 /**
220  * drmm_kmalloc - &drm_device managed kmalloc()
221  * @dev: DRM device
222  * @size: size of the memory allocation
223  * @gfp: GFP allocation flags
224  *
225  * This is a &drm_device managed version of kmalloc(). The allocated memory is
226  * automatically freed on the final drm_dev_put(). Memory can also be freed
227  * before the final drm_dev_put() by calling drmm_kfree().
228  */
drmm_kmalloc(struct drm_device * dev,size_t size,gfp_t gfp)229 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
230 {
231 	struct drmres *dr;
232 
233 	dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
234 	if (!dr) {
235 		drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
236 			       size, gfp);
237 		return NULL;
238 	}
239 	dr->node.name = kstrdup_const("kmalloc", gfp);
240 
241 	add_dr(dev, dr);
242 
243 	return dr->data;
244 }
245 EXPORT_SYMBOL(drmm_kmalloc);
246 
247 /**
248  * drmm_kstrdup - &drm_device managed kstrdup()
249  * @dev: DRM device
250  * @s: 0-terminated string to be duplicated
251  * @gfp: GFP allocation flags
252  *
253  * This is a &drm_device managed version of kstrdup(). The allocated memory is
254  * automatically freed on the final drm_dev_put() and works exactly like a
255  * memory allocation obtained by drmm_kmalloc().
256  */
drmm_kstrdup(struct drm_device * dev,const char * s,gfp_t gfp)257 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
258 {
259 	size_t size;
260 	char *buf;
261 
262 	if (!s)
263 		return NULL;
264 
265 	size = strlen(s) + 1;
266 	buf = drmm_kmalloc(dev, size, gfp);
267 	if (buf)
268 		memcpy(buf, s, size);
269 	return buf;
270 }
271 EXPORT_SYMBOL_GPL(drmm_kstrdup);
272 
273 /**
274  * drmm_kfree - &drm_device managed kfree()
275  * @dev: DRM device
276  * @data: memory allocation to be freed
277  *
278  * This is a &drm_device managed version of kfree() which can be used to
279  * release memory allocated through drmm_kmalloc() or any of its related
280  * functions before the final drm_dev_put() of @dev.
281  */
drmm_kfree(struct drm_device * dev,void * data)282 void drmm_kfree(struct drm_device *dev, void *data)
283 {
284 	struct drmres *dr_match = NULL, *dr;
285 	unsigned long flags;
286 
287 	if (!data)
288 		return;
289 
290 	spin_lock_irqsave(&dev->managed.lock, flags);
291 	list_for_each_entry(dr, &dev->managed.resources, node.entry) {
292 		if (dr->data == data) {
293 			dr_match = dr;
294 			del_dr(dev, dr_match);
295 			break;
296 		}
297 	}
298 	spin_unlock_irqrestore(&dev->managed.lock, flags);
299 
300 	if (WARN_ON(!dr_match))
301 		return;
302 
303 	free_dr(dr_match);
304 }
305 EXPORT_SYMBOL(drmm_kfree);
306 
__drmm_mutex_release(struct drm_device * dev,void * res)307 void __drmm_mutex_release(struct drm_device *dev, void *res)
308 {
309 	struct mutex *lock = res;
310 
311 	mutex_destroy(lock);
312 }
313 EXPORT_SYMBOL(__drmm_mutex_release);
314 
__drmm_workqueue_release(struct drm_device * device,void * res)315 void __drmm_workqueue_release(struct drm_device *device, void *res)
316 {
317 	struct workqueue_struct *wq = res;
318 
319 	destroy_workqueue(wq);
320 }
321 EXPORT_SYMBOL(__drmm_workqueue_release);
322