xref: /linux/include/linux/cleanup.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLEANUP_H
3 #define _LINUX_CLEANUP_H
4 
5 #include <linux/compiler.h>
6 #include <linux/err.h>
7 #include <linux/args.h>
8 
9 /**
10  * DOC: scope-based cleanup helpers
11  *
12  * The "goto error" pattern is notorious for introducing subtle resource
13  * leaks. It is tedious and error prone to add new resource acquisition
14  * constraints into code paths that already have several unwind
15  * conditions. The "cleanup" helpers enable the compiler to help with
16  * this tedium and can aid in maintaining LIFO (last in first out)
17  * unwind ordering to avoid unintentional leaks.
18  *
19  * As drivers make up the majority of the kernel code base, here is an
20  * example of using these helpers to clean up PCI drivers. The target of
21  * the cleanups are occasions where a goto is used to unwind a device
22  * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
23  * before returning.
24  *
25  * The DEFINE_FREE() macro can arrange for PCI device references to be
26  * dropped when the associated variable goes out of scope::
27  *
28  *	DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
29  *	...
30  *	struct pci_dev *dev __free(pci_dev_put) =
31  *		pci_get_slot(parent, PCI_DEVFN(0, 0));
32  *
33  * The above will automatically call pci_dev_put() if @dev is non-NULL
34  * when @dev goes out of scope (automatic variable scope). If a function
35  * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
36  * freeing it) on success, it can do::
37  *
38  *	return no_free_ptr(dev);
39  *
40  * ...or::
41  *
42  *	return_ptr(dev);
43  *
44  * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
45  * dropped when the scope where guard() is invoked ends::
46  *
47  *	DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
48  *	...
49  *	guard(pci_dev)(dev);
50  *
51  * The lifetime of the lock obtained by the guard() helper follows the
52  * scope of automatic variable declaration. Take the following example::
53  *
54  *	func(...)
55  *	{
56  *		if (...) {
57  *			...
58  *			guard(pci_dev)(dev); // pci_dev_lock() invoked here
59  *			...
60  *		} // <- implied pci_dev_unlock() triggered here
61  *	}
62  *
63  * Observe the lock is held for the remainder of the "if ()" block not
64  * the remainder of "func()".
65  *
66  * The ACQUIRE() macro can be used in all places that guard() can be
67  * used and additionally support conditional locks::
68  *
69  *	DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
70  *	...
71  *	ACQUIRE(pci_dev_try, lock)(dev);
72  *	rc = ACQUIRE_ERR(pci_dev_try, &lock);
73  *	if (rc)
74  *		return rc;
75  *	// @lock is held
76  *
77  * Now, when a function uses both __free() and guard()/ACQUIRE(), or
78  * multiple instances of __free(), the LIFO order of variable definition
79  * order matters. GCC documentation says:
80  *
81  * "When multiple variables in the same scope have cleanup attributes,
82  * at exit from the scope their associated cleanup functions are run in
83  * reverse order of definition (last defined, first cleanup)."
84  *
85  * When the unwind order matters it requires that variables be defined
86  * mid-function scope rather than at the top of the file.  Take the
87  * following example and notice the bug highlighted by "!!"::
88  *
89  *	LIST_HEAD(list);
90  *	DEFINE_MUTEX(lock);
91  *
92  *	struct object {
93  *	        struct list_head node;
94  *	};
95  *
96  *	static struct object *alloc_add(void)
97  *	{
98  *	        struct object *obj;
99  *
100  *	        lockdep_assert_held(&lock);
101  *	        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
102  *	        if (obj) {
103  *	                LIST_HEAD_INIT(&obj->node);
104  *	                list_add(obj->node, &list):
105  *	        }
106  *	        return obj;
107  *	}
108  *
109  *	static void remove_free(struct object *obj)
110  *	{
111  *	        lockdep_assert_held(&lock);
112  *	        list_del(&obj->node);
113  *	        kfree(obj);
114  *	}
115  *
116  *	DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
117  *	static int init(void)
118  *	{
119  *	        struct object *obj __free(remove_free) = NULL;
120  *	        int err;
121  *
122  *	        guard(mutex)(&lock);
123  *	        obj = alloc_add();
124  *
125  *	        if (!obj)
126  *	                return -ENOMEM;
127  *
128  *	        err = other_init(obj);
129  *	        if (err)
130  *	                return err; // remove_free() called without the lock!!
131  *
132  *	        no_free_ptr(obj);
133  *	        return 0;
134  *	}
135  *
136  * That bug is fixed by changing init() to call guard() and define +
137  * initialize @obj in this order::
138  *
139  *	guard(mutex)(&lock);
140  *	struct object *obj __free(remove_free) = alloc_add();
141  *
142  * Given that the "__free(...) = NULL" pattern for variables defined at
143  * the top of the function poses this potential interdependency problem
144  * the recommendation is to always define and assign variables in one
145  * statement and not group variable definitions at the top of the
146  * function when __free() is used.
147  *
148  * Lastly, given that the benefit of cleanup helpers is removal of
149  * "goto", and that the "goto" statement can jump between scopes, the
150  * expectation is that usage of "goto" and cleanup helpers is never
151  * mixed in the same function. I.e. for a given routine, convert all
152  * resources that need a "goto" cleanup to scope-based cleanup, or
153  * convert none of them.
154  */
155 
156 /*
157  * DEFINE_FREE(name, type, free):
158  *	simple helper macro that defines the required wrapper for a __free()
159  *	based cleanup function. @free is an expression using '_T' to access the
160  *	variable. @free should typically include a NULL test before calling a
161  *	function, see the example below.
162  *
163  * __free(name):
164  *	variable attribute to add a scoped based cleanup to the variable.
165  *
166  * no_free_ptr(var):
167  *	like a non-atomic xchg(var, NULL), such that the cleanup function will
168  *	be inhibited -- provided it sanely deals with a NULL value.
169  *
170  *	NOTE: this has __must_check semantics so that it is harder to accidentally
171  *	leak the resource.
172  *
173  * return_ptr(p):
174  *	returns p while inhibiting the __free().
175  *
176  * Ex.
177  *
178  * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
179  *
180  * void *alloc_obj(...)
181  * {
182  *	struct obj *p __free(kfree) = kmalloc(...);
183  *	if (!p)
184  *		return NULL;
185  *
186  *	if (!init_obj(p))
187  *		return NULL;
188  *
189  *	return_ptr(p);
190  * }
191  *
192  * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
193  * kfree() is fine to be called with a NULL value. This is on purpose. This way
194  * the compiler sees the end of our alloc_obj() function as:
195  *
196  *	tmp = p;
197  *	p = NULL;
198  *	if (p)
199  *		kfree(p);
200  *	return tmp;
201  *
202  * And through the magic of value-propagation and dead-code-elimination, it
203  * eliminates the actual cleanup call and compiles into:
204  *
205  *	return p;
206  *
207  * Without the NULL test it turns into a mess and the compiler can't help us.
208  */
209 
210 #define DEFINE_FREE(_name, _type, _free) \
211 	static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
212 
213 #define __free(_name)	__cleanup(__free_##_name)
214 
215 #define __get_and_null(p, nullvalue)   \
216 	({                                  \
217 		__auto_type __ptr = &(p);   \
218 		__auto_type __val = *__ptr; \
219 		*__ptr = nullvalue;         \
220 		__val;                      \
221 	})
222 
223 static inline __must_check
__must_check_fn(const volatile void * val)224 const volatile void * __must_check_fn(const volatile void *val)
225 { return val; }
226 
227 #define no_free_ptr(p) \
228 	((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
229 
230 #define return_ptr(p)	return no_free_ptr(p)
231 
232 /*
233  * Only for situations where an allocation is handed in to another function
234  * and consumed by that function on success.
235  *
236  *	struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
237  *
238  *	setup(f);
239  *	if (some_condition)
240  *		return -EINVAL;
241  *	....
242  *	ret = bar(f);
243  *	if (!ret)
244  *		retain_and_null_ptr(f);
245  *	return ret;
246  *
247  * After retain_and_null_ptr(f) the variable f is NULL and cannot be
248  * dereferenced anymore.
249  */
250 #define retain_and_null_ptr(p)		((void)__get_and_null(p, NULL))
251 
252 /*
253  * DEFINE_CLASS(name, type, exit, init, init_args...):
254  *	helper to define the destructor and constructor for a type.
255  *	@exit is an expression using '_T' -- similar to FREE above.
256  *	@init is an expression in @init_args resulting in @type
257  *
258  * EXTEND_CLASS(name, ext, init, init_args...):
259  *	extends class @name to @name@ext with the new constructor
260  *
261  * CLASS(name, var)(args...):
262  *	declare the variable @var as an instance of the named class
263  *
264  * Ex.
265  *
266  * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
267  *
268  *	CLASS(fdget, f)(fd);
269  *	if (fd_empty(f))
270  *		return -EBADF;
271  *
272  *	// use 'f' without concern
273  */
274 
275 #define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)		\
276 typedef _type class_##_name##_t;					\
277 static inline void class_##_name##_destructor(_type *p)			\
278 { _type _T = *p; _exit; }						\
279 static inline _type class_##_name##_constructor(_init_args)		\
280 { _type t = _init; return t; }
281 
282 #define EXTEND_CLASS(_name, ext, _init, _init_args...)			\
283 typedef class_##_name##_t class_##_name##ext##_t;			\
284 static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
285 { class_##_name##_destructor(p); }					\
286 static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
287 { class_##_name##_t t = _init; return t; }
288 
289 #define CLASS(_name, var)						\
290 	class_##_name##_t var __cleanup(class_##_name##_destructor) =	\
291 		class_##_name##_constructor
292 
293 #define scoped_class(_name, var, args)                          \
294 	for (CLASS(_name, var)(args);                           \
295 	     __guard_ptr(_name)(&var) || !__is_cond_ptr(_name); \
296 	     ({ goto _label; }))                                \
297 		if (0) {                                        \
298 _label:                                                         \
299 			break;                                  \
300 		} else
301 
302 /*
303  * DEFINE_GUARD(name, type, lock, unlock):
304  *	trivial wrapper around DEFINE_CLASS() above specifically
305  *	for locks.
306  *
307  * DEFINE_GUARD_COND(name, ext, condlock)
308  *	wrapper around EXTEND_CLASS above to add conditional lock
309  *	variants to a base class, eg. mutex_trylock() or
310  *	mutex_lock_interruptible().
311  *
312  * guard(name):
313  *	an anonymous instance of the (guard) class, not recommended for
314  *	conditional locks.
315  *
316  * scoped_guard (name, args...) { }:
317  *	similar to CLASS(name, scope)(args), except the variable (with the
318  *	explicit name 'scope') is declard in a for-loop such that its scope is
319  *	bound to the next (compound) statement.
320  *
321  *	for conditional locks the loop body is skipped when the lock is not
322  *	acquired.
323  *
324  * scoped_cond_guard (name, fail, args...) { }:
325  *      similar to scoped_guard(), except it does fail when the lock
326  *      acquire fails.
327  *
328  *      Only for conditional locks.
329  *
330  * ACQUIRE(name, var):
331  *	a named instance of the (guard) class, suitable for conditional
332  *	locks when paired with ACQUIRE_ERR().
333  *
334  * ACQUIRE_ERR(name, &var):
335  *	a helper that is effectively a PTR_ERR() conversion of the guard
336  *	pointer. Returns 0 when the lock was acquired and a negative
337  *	error code otherwise.
338  */
339 
340 #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond)	\
341 static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
342 
343 #define __GUARD_IS_ERR(_ptr)                                       \
344 	({                                                         \
345 		unsigned long _rc = (__force unsigned long)(_ptr); \
346 		unlikely((_rc - 1) >= -MAX_ERRNO - 1);             \
347 	})
348 
349 #define __DEFINE_GUARD_LOCK_PTR(_name, _exp)                                \
350 	static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
351 	{                                                                   \
352 		void *_ptr = (void *)(__force unsigned long)*(_exp);        \
353 		if (IS_ERR(_ptr)) {                                         \
354 			_ptr = NULL;                                        \
355 		}                                                           \
356 		return _ptr;                                                \
357 	}                                                                   \
358 	static inline int class_##_name##_lock_err(class_##_name##_t *_T)   \
359 	{                                                                   \
360 		long _rc = (__force unsigned long)*(_exp);                  \
361 		if (!_rc) {                                                 \
362 			_rc = -EBUSY;                                       \
363 		}                                                           \
364 		if (!IS_ERR_VALUE(_rc)) {                                   \
365 			_rc = 0;                                            \
366 		}                                                           \
367 		return _rc;                                                 \
368 	}
369 
370 #define DEFINE_CLASS_IS_GUARD(_name) \
371 	__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
372 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
373 
374 #define DEFINE_CLASS_IS_COND_GUARD(_name) \
375 	__DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
376 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
377 
378 #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
379 	DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
380 	DEFINE_CLASS_IS_GUARD(_name)
381 
382 #define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
383 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
384 	EXTEND_CLASS(_name, _ext, \
385 		     ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
386 		     class_##_name##_t _T) \
387 	static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
388 	{ return class_##_name##_lock_ptr(_T); } \
389 	static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
390 	{ return class_##_name##_lock_err(_T); }
391 
392 /*
393  * Default binary condition; success on 'true'.
394  */
395 #define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
396 	DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
397 
398 #define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
399 
400 #define guard(_name) \
401 	CLASS(_name, __UNIQUE_ID(guard))
402 
403 #define __guard_ptr(_name) class_##_name##_lock_ptr
404 #define __guard_err(_name) class_##_name##_lock_err
405 #define __is_cond_ptr(_name) class_##_name##_is_conditional
406 
407 #define ACQUIRE(_name, _var)     CLASS(_name, _var)
408 #define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
409 
410 /*
411  * Helper macro for scoped_guard().
412  *
413  * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
414  * compiler would be sure that for the unconditional locks the body of the
415  * loop (caller-provided code glued to the else clause) could not be skipped.
416  * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
417  * hard to deduce (even if could be proven true for unconditional locks).
418  */
419 #define __scoped_guard(_name, _label, args...)				\
420 	for (CLASS(_name, scope)(args);					\
421 	     __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name);	\
422 	     ({ goto _label; }))					\
423 		if (0) {						\
424 _label:									\
425 			break;						\
426 		} else
427 
428 #define scoped_guard(_name, args...)	\
429 	__scoped_guard(_name, __UNIQUE_ID(label), args)
430 
431 #define __scoped_cond_guard(_name, _fail, _label, args...)		\
432 	for (CLASS(_name, scope)(args); true; ({ goto _label; }))	\
433 		if (!__guard_ptr(_name)(&scope)) {			\
434 			BUILD_BUG_ON(!__is_cond_ptr(_name));		\
435 			_fail;						\
436 _label:									\
437 			break;						\
438 		} else
439 
440 #define scoped_cond_guard(_name, _fail, args...)	\
441 	__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
442 
443 /*
444  * Additional helper macros for generating lock guards with types, either for
445  * locks that don't have a native type (eg. RCU, preempt) or those that need a
446  * 'fat' pointer (eg. spin_lock_irqsave).
447  *
448  * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
449  * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
450  * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
451  *
452  * will result in the following type:
453  *
454  *   typedef struct {
455  *	type *lock;		// 'type := void' for the _0 variant
456  *	__VA_ARGS__;
457  *   } class_##name##_t;
458  *
459  * As above, both _lock and _unlock are statements, except this time '_T' will
460  * be a pointer to the above struct.
461  */
462 
463 #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)		\
464 typedef struct {							\
465 	_type *lock;							\
466 	__VA_ARGS__;							\
467 } class_##_name##_t;							\
468 									\
469 static inline void class_##_name##_destructor(class_##_name##_t *_T)	\
470 {									\
471 	if (!__GUARD_IS_ERR(_T->lock)) { _unlock; }			\
472 }									\
473 									\
474 __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
475 
476 #define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)			\
477 static inline class_##_name##_t class_##_name##_constructor(_type *l)	\
478 {									\
479 	class_##_name##_t _t = { .lock = l }, *_T = &_t;		\
480 	_lock;								\
481 	return _t;							\
482 }
483 
484 #define __DEFINE_LOCK_GUARD_0(_name, _lock)				\
485 static inline class_##_name##_t class_##_name##_constructor(void)	\
486 {									\
487 	class_##_name##_t _t = { .lock = (void*)1 },			\
488 			 *_T __maybe_unused = &_t;			\
489 	_lock;								\
490 	return _t;							\
491 }
492 
493 #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)		\
494 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
495 __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)		\
496 __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
497 
498 #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)			\
499 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
500 __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)		\
501 __DEFINE_LOCK_GUARD_0(_name, _lock)
502 
503 #define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond)		\
504 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true);		\
505 	EXTEND_CLASS(_name, _ext,					\
506 		     ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
507 		        int _RET = (_lock);                             \
508 		        if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
509 			_t; }),						\
510 		     typeof_member(class_##_name##_t, lock) l)		\
511 	static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
512 	{ return class_##_name##_lock_ptr(_T); } \
513 	static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
514 	{ return class_##_name##_lock_err(_T); }
515 
516 #define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
517 	DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
518 
519 #define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
520 
521 #endif /* _LINUX_CLEANUP_H */
522