1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLEANUP_H
3 #define _LINUX_CLEANUP_H
4
5 #include <linux/compiler.h>
6 #include <linux/err.h>
7 #include <linux/args.h>
8
9 /**
10 * DOC: scope-based cleanup helpers
11 *
12 * The "goto error" pattern is notorious for introducing subtle resource
13 * leaks. It is tedious and error prone to add new resource acquisition
14 * constraints into code paths that already have several unwind
15 * conditions. The "cleanup" helpers enable the compiler to help with
16 * this tedium and can aid in maintaining LIFO (last in first out)
17 * unwind ordering to avoid unintentional leaks.
18 *
19 * As drivers make up the majority of the kernel code base, here is an
20 * example of using these helpers to clean up PCI drivers. The target of
21 * the cleanups are occasions where a goto is used to unwind a device
22 * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
23 * before returning.
24 *
25 * The DEFINE_FREE() macro can arrange for PCI device references to be
26 * dropped when the associated variable goes out of scope::
27 *
28 * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
29 * ...
30 * struct pci_dev *dev __free(pci_dev_put) =
31 * pci_get_slot(parent, PCI_DEVFN(0, 0));
32 *
33 * The above will automatically call pci_dev_put() if @dev is non-NULL
34 * when @dev goes out of scope (automatic variable scope). If a function
35 * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
36 * freeing it) on success, it can do::
37 *
38 * return no_free_ptr(dev);
39 *
40 * ...or::
41 *
42 * return_ptr(dev);
43 *
44 * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
45 * dropped when the scope where guard() is invoked ends::
46 *
47 * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
48 * ...
49 * guard(pci_dev)(dev);
50 *
51 * The lifetime of the lock obtained by the guard() helper follows the
52 * scope of automatic variable declaration. Take the following example::
53 *
54 * func(...)
55 * {
56 * if (...) {
57 * ...
58 * guard(pci_dev)(dev); // pci_dev_lock() invoked here
59 * ...
60 * } // <- implied pci_dev_unlock() triggered here
61 * }
62 *
63 * Observe the lock is held for the remainder of the "if ()" block not
64 * the remainder of "func()".
65 *
66 * The ACQUIRE() macro can be used in all places that guard() can be
67 * used and additionally support conditional locks::
68 *
69 * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
70 * ...
71 * ACQUIRE(pci_dev_try, lock)(dev);
72 * rc = ACQUIRE_ERR(pci_dev_try, &lock);
73 * if (rc)
74 * return rc;
75 * // @lock is held
76 *
77 * Now, when a function uses both __free() and guard()/ACQUIRE(), or
78 * multiple instances of __free(), the LIFO order of variable definition
79 * order matters. GCC documentation says:
80 *
81 * "When multiple variables in the same scope have cleanup attributes,
82 * at exit from the scope their associated cleanup functions are run in
83 * reverse order of definition (last defined, first cleanup)."
84 *
85 * When the unwind order matters it requires that variables be defined
86 * mid-function scope rather than at the top of the file. Take the
87 * following example and notice the bug highlighted by "!!"::
88 *
89 * LIST_HEAD(list);
90 * DEFINE_MUTEX(lock);
91 *
92 * struct object {
93 * struct list_head node;
94 * };
95 *
96 * static struct object *alloc_add(void)
97 * {
98 * struct object *obj;
99 *
100 * lockdep_assert_held(&lock);
101 * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
102 * if (obj) {
103 * LIST_HEAD_INIT(&obj->node);
104 * list_add(obj->node, &list):
105 * }
106 * return obj;
107 * }
108 *
109 * static void remove_free(struct object *obj)
110 * {
111 * lockdep_assert_held(&lock);
112 * list_del(&obj->node);
113 * kfree(obj);
114 * }
115 *
116 * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
117 * static int init(void)
118 * {
119 * struct object *obj __free(remove_free) = NULL;
120 * int err;
121 *
122 * guard(mutex)(&lock);
123 * obj = alloc_add();
124 *
125 * if (!obj)
126 * return -ENOMEM;
127 *
128 * err = other_init(obj);
129 * if (err)
130 * return err; // remove_free() called without the lock!!
131 *
132 * no_free_ptr(obj);
133 * return 0;
134 * }
135 *
136 * That bug is fixed by changing init() to call guard() and define +
137 * initialize @obj in this order::
138 *
139 * guard(mutex)(&lock);
140 * struct object *obj __free(remove_free) = alloc_add();
141 *
142 * Given that the "__free(...) = NULL" pattern for variables defined at
143 * the top of the function poses this potential interdependency problem
144 * the recommendation is to always define and assign variables in one
145 * statement and not group variable definitions at the top of the
146 * function when __free() is used.
147 *
148 * Lastly, given that the benefit of cleanup helpers is removal of
149 * "goto", and that the "goto" statement can jump between scopes, the
150 * expectation is that usage of "goto" and cleanup helpers is never
151 * mixed in the same function. I.e. for a given routine, convert all
152 * resources that need a "goto" cleanup to scope-based cleanup, or
153 * convert none of them.
154 */
155
156 /*
157 * DEFINE_FREE(name, type, free):
158 * simple helper macro that defines the required wrapper for a __free()
159 * based cleanup function. @free is an expression using '_T' to access the
160 * variable. @free should typically include a NULL test before calling a
161 * function, see the example below.
162 *
163 * __free(name):
164 * variable attribute to add a scoped based cleanup to the variable.
165 *
166 * no_free_ptr(var):
167 * like a non-atomic xchg(var, NULL), such that the cleanup function will
168 * be inhibited -- provided it sanely deals with a NULL value.
169 *
170 * NOTE: this has __must_check semantics so that it is harder to accidentally
171 * leak the resource.
172 *
173 * return_ptr(p):
174 * returns p while inhibiting the __free().
175 *
176 * Ex.
177 *
178 * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
179 *
180 * void *alloc_obj(...)
181 * {
182 * struct obj *p __free(kfree) = kmalloc(...);
183 * if (!p)
184 * return NULL;
185 *
186 * if (!init_obj(p))
187 * return NULL;
188 *
189 * return_ptr(p);
190 * }
191 *
192 * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
193 * kfree() is fine to be called with a NULL value. This is on purpose. This way
194 * the compiler sees the end of our alloc_obj() function as:
195 *
196 * tmp = p;
197 * p = NULL;
198 * if (p)
199 * kfree(p);
200 * return tmp;
201 *
202 * And through the magic of value-propagation and dead-code-elimination, it
203 * eliminates the actual cleanup call and compiles into:
204 *
205 * return p;
206 *
207 * Without the NULL test it turns into a mess and the compiler can't help us.
208 */
209
210 #define DEFINE_FREE(_name, _type, _free) \
211 static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
212
213 #define __free(_name) __cleanup(__free_##_name)
214
215 #define __get_and_null(p, nullvalue) \
216 ({ \
217 auto __ptr = &(p); \
218 auto __val = *__ptr; \
219 *__ptr = nullvalue; \
220 __val; \
221 })
222
223 static __always_inline __must_check
__must_check_fn(const volatile void * val)224 const volatile void * __must_check_fn(const volatile void *val)
225 { return val; }
226
227 #define no_free_ptr(p) \
228 ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
229
230 #define return_ptr(p) return no_free_ptr(p)
231
232 /*
233 * Only for situations where an allocation is handed in to another function
234 * and consumed by that function on success.
235 *
236 * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
237 *
238 * setup(f);
239 * if (some_condition)
240 * return -EINVAL;
241 * ....
242 * ret = bar(f);
243 * if (!ret)
244 * retain_and_null_ptr(f);
245 * return ret;
246 *
247 * After retain_and_null_ptr(f) the variable f is NULL and cannot be
248 * dereferenced anymore.
249 */
250 #define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
251
252 /*
253 * DEFINE_CLASS(name, type, exit, init, init_args...):
254 * helper to define the destructor and constructor for a type.
255 * @exit is an expression using '_T' -- similar to FREE above.
256 * @init is an expression in @init_args resulting in @type
257 *
258 * EXTEND_CLASS(name, ext, init, init_args...):
259 * extends class @name to @name@ext with the new constructor
260 *
261 * CLASS(name, var)(args...):
262 * declare the variable @var as an instance of the named class
263 *
264 * CLASS_INIT(name, var, init_expr):
265 * declare the variable @var as an instance of the named class with
266 * custom initialization expression.
267 *
268 * Ex.
269 *
270 * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
271 *
272 * CLASS(fdget, f)(fd);
273 * if (fd_empty(f))
274 * return -EBADF;
275 *
276 * // use 'f' without concern
277 */
278
279 #define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
280 typedef _type class_##_name##_t; \
281 typedef _type lock_##_name##_t; \
282 static __always_inline void class_##_name##_destructor(_type *p) \
283 __no_context_analysis \
284 { _type _T = *p; _exit; } \
285 static __always_inline _type class_##_name##_constructor(_init_args) \
286 __no_context_analysis \
287 { _type t = _init; return t; }
288
289 #define EXTEND_CLASS_COND(_name, ext, _cond, _init, _init_args...) \
290 typedef lock_##_name##_t lock_##_name##ext##_t; \
291 typedef class_##_name##_t class_##_name##ext##_t; \
292 static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *_T) \
293 { if (_cond) return; class_##_name##_destructor(_T); } \
294 static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
295 __no_context_analysis \
296 { class_##_name##_t t = _init; return t; }
297
298 #define EXTEND_CLASS(_name, ext, _init, _init_args...) \
299 EXTEND_CLASS_COND(_name, ext, 0, _init, _init_args)
300
301 #define CLASS(_name, var) \
302 class_##_name##_t var __cleanup(class_##_name##_destructor) = \
303 class_##_name##_constructor
304
305 #define CLASS_INIT(_name, _var, _init_expr) \
306 class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr)
307
308 #define __scoped_class(_name, var, _label, args...) \
309 for (CLASS(_name, var)(args); ; ({ goto _label; })) \
310 if (0) { \
311 _label: \
312 break; \
313 } else
314
315 #define scoped_class(_name, var, args...) \
316 __scoped_class(_name, var, __UNIQUE_ID(label), args)
317
318 /*
319 * DEFINE_GUARD(name, type, lock, unlock):
320 * trivial wrapper around DEFINE_CLASS() above specifically
321 * for locks.
322 *
323 * DEFINE_GUARD_COND(name, ext, condlock)
324 * wrapper around EXTEND_CLASS above to add conditional lock
325 * variants to a base class, eg. mutex_trylock() or
326 * mutex_lock_interruptible().
327 *
328 * guard(name):
329 * an anonymous instance of the (guard) class, not recommended for
330 * conditional locks.
331 *
332 * scoped_guard (name, args...) { }:
333 * similar to CLASS(name, scope)(args), except the variable (with the
334 * explicit name 'scope') is declard in a for-loop such that its scope is
335 * bound to the next (compound) statement.
336 *
337 * for conditional locks the loop body is skipped when the lock is not
338 * acquired.
339 *
340 * scoped_cond_guard (name, fail, args...) { }:
341 * similar to scoped_guard(), except it does fail when the lock
342 * acquire fails.
343 *
344 * Only for conditional locks.
345 *
346 * ACQUIRE(name, var):
347 * a named instance of the (guard) class, suitable for conditional
348 * locks when paired with ACQUIRE_ERR().
349 *
350 * ACQUIRE_ERR(name, &var):
351 * a helper that is effectively a PTR_ERR() conversion of the guard
352 * pointer. Returns 0 when the lock was acquired and a negative
353 * error code otherwise.
354 */
355
356 #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
357 static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
358
359 #define DEFINE_CLASS_IS_UNCONDITIONAL(_name) \
360 __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
361 static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
362 { return (void *)1; }
363
364 #define __GUARD_IS_ERR(_ptr) \
365 ({ \
366 unsigned long _rc = (__force unsigned long)(_ptr); \
367 unlikely((_rc - 1) >= -MAX_ERRNO - 1); \
368 })
369
370 #define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
371 static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
372 { \
373 void *_ptr = (void *)(__force unsigned long)*(_exp); \
374 if (IS_ERR(_ptr)) { \
375 _ptr = NULL; \
376 } \
377 return _ptr; \
378 } \
379 static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
380 { \
381 long _rc = (__force unsigned long)*(_exp); \
382 if (!_rc) { \
383 _rc = -EBUSY; \
384 } \
385 if (!IS_ERR_VALUE(_rc)) { \
386 _rc = 0; \
387 } \
388 return _rc; \
389 }
390
391 #define DEFINE_CLASS_IS_GUARD(_name) \
392 __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
393 __DEFINE_GUARD_LOCK_PTR(_name, _T)
394
395 #define DEFINE_CLASS_IS_COND_GUARD(_name) \
396 __DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
397 __DEFINE_GUARD_LOCK_PTR(_name, _T)
398
399 #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
400 DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
401 DEFINE_CLASS_IS_GUARD(_name)
402
403 #define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
404 __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
405 EXTEND_CLASS_COND(_name, _ext, __GUARD_IS_ERR(*_T), \
406 ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
407 class_##_name##_t _T) \
408 static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
409 { return class_##_name##_lock_ptr(_T); } \
410 static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
411 { return class_##_name##_lock_err(_T); }
412
413 /*
414 * Default binary condition; success on 'true'.
415 */
416 #define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
417 DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
418
419 #define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
420
421 #define guard(_name) \
422 CLASS(_name, __UNIQUE_ID(guard))
423
424 #define __guard_ptr(_name) class_##_name##_lock_ptr
425 #define __guard_err(_name) class_##_name##_lock_err
426 #define __is_cond_ptr(_name) class_##_name##_is_conditional
427
428 #define ACQUIRE(_name, _var) CLASS(_name, _var)
429 #define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
430
431 /*
432 * Helper macro for scoped_guard().
433 *
434 * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
435 * compiler would be sure that for the unconditional locks the body of the
436 * loop (caller-provided code glued to the else clause) could not be skipped.
437 * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
438 * hard to deduce (even if could be proven true for unconditional locks).
439 */
440 #define __scoped_guard(_name, _label, args...) \
441 for (CLASS(_name, scope)(args); \
442 __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
443 ({ goto _label; })) \
444 if (0) { \
445 _label: \
446 break; \
447 } else
448
449 #define scoped_guard(_name, args...) \
450 __scoped_guard(_name, __UNIQUE_ID(label), args)
451
452 #define __scoped_cond_guard(_name, _fail, _label, args...) \
453 for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
454 if (!__guard_ptr(_name)(&scope)) { \
455 BUILD_BUG_ON(!__is_cond_ptr(_name)); \
456 _fail; \
457 _label: \
458 break; \
459 } else
460
461 #define scoped_cond_guard(_name, _fail, args...) \
462 __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
463
464 /*
465 * Additional helper macros for generating lock guards with types, either for
466 * locks that don't have a native type (eg. RCU, preempt) or those that need a
467 * 'fat' pointer (eg. spin_lock_irqsave).
468 *
469 * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
470 * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
471 * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
472 *
473 * will result in the following type:
474 *
475 * typedef struct {
476 * type *lock; // 'type := void' for the _0 variant
477 * __VA_ARGS__;
478 * } class_##name##_t;
479 *
480 * As above, both _lock and _unlock are statements, except this time '_T' will
481 * be a pointer to the above struct.
482 */
483
484 #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
485 typedef _type lock_##_name##_t; \
486 typedef struct { \
487 _type *lock; \
488 __VA_ARGS__; \
489 } class_##_name##_t; \
490 \
491 static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
492 __no_context_analysis \
493 { \
494 if (_T->lock) { _unlock; } \
495 } \
496 \
497 __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
498
499 #define __DEFINE_LOCK_GUARD_1(_name, _type, ...) \
500 static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
501 __no_context_analysis \
502 { \
503 class_##_name##_t _t = { .lock = l }, *_T = &_t; \
504 __VA_ARGS__; \
505 return _t; \
506 }
507
508 #define __DEFINE_LOCK_GUARD_0(_name, ...) \
509 static __always_inline class_##_name##_t class_##_name##_constructor(void) \
510 __no_context_analysis \
511 { \
512 class_##_name##_t _t = { .lock = (void*)1 }, \
513 *_T __maybe_unused = &_t; \
514 __VA_ARGS__; \
515 return _t; \
516 }
517
518 #define DECLARE_LOCK_GUARD_0_ATTRS(_name, _lock, _unlock) \
519 static inline class_##_name##_t class_##_name##_constructor(void) _lock;\
520 static inline void class_##_name##_destructor(class_##_name##_t *_T) _unlock;
521
522 /*
523 * To support Context Analysis, we need to allow the compiler to see the
524 * acquisition and release of the context lock. However, the "cleanup" helpers
525 * wrap the lock in a struct passed through separate helper functions, which
526 * hides the lock alias from the compiler (no inter-procedural analysis).
527 *
528 * To make it work, we introduce an explicit alias to the context lock instance
529 * that is "cleaned" up with a separate cleanup helper. This helper is a dummy
530 * function that does nothing at runtime, but has the "_unlock" attribute to
531 * tell the compiler what happens at the end of the scope.
532 *
533 * To generalize the pattern, the WITH_LOCK_GUARD_1_ATTRS() macro should be used
534 * to redefine the constructor, which then also creates the alias variable with
535 * the right "cleanup" attribute, *after* DECLARE_LOCK_GUARD_1_ATTRS() has been
536 * used.
537 *
538 * Example usage:
539 *
540 * DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
541 * #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
542 *
543 * Note: To support the for-loop based scoped helpers, the auxiliary variable
544 * must be a pointer to the "class" type because it is defined in the same
545 * statement as the guard variable. However, we initialize it with the lock
546 * pointer (despite the type mismatch, the compiler's alias analysis still works
547 * as expected). The "_unlock" attribute receives a pointer to the auxiliary
548 * variable (a double pointer to the class type), and must be cast and
549 * dereferenced appropriately.
550 */
551 #define DECLARE_LOCK_GUARD_1_ATTRS(_name, _lock, _unlock) \
552 static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T) _lock;\
553 static __always_inline void __class_##_name##_cleanup_ctx(class_##_name##_t **_T) \
554 __no_context_analysis _unlock { }
555 #define WITH_LOCK_GUARD_1_ATTRS(_name, _T) \
556 class_##_name##_constructor(_T), \
557 *__UNIQUE_ID(unlock) __cleanup(__class_##_name##_cleanup_ctx) = (void *)(unsigned long)(_T)
558
559 #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
560 __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
561 __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
562 __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
563
564 #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
565 __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
566 __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
567 __DEFINE_LOCK_GUARD_0(_name, _lock)
568
569 #define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \
570 __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
571 EXTEND_CLASS_COND(_name, _ext, __GUARD_IS_ERR(_T->lock), \
572 ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
573 int _RET = (_lock); \
574 if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
575 _t; }), \
576 typeof_member(class_##_name##_t, lock) l) \
577 static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
578 { return class_##_name##_lock_ptr(_T); } \
579 static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
580 { return class_##_name##_lock_err(_T); }
581
582 #define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
583 DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
584
585 #define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
586
587 #endif /* _LINUX_CLEANUP_H */
588