1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Resilient Queued Spin Lock defines
4 *
5 * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates.
6 *
7 * Authors: Kumar Kartikeya Dwivedi <memxor@gmail.com>
8 */
9 #ifndef __LINUX_RQSPINLOCK_H
10 #define __LINUX_RQSPINLOCK_H
11
12 #include "../locking/qspinlock.h"
13
14 /*
15 * try_cmpxchg_tail - Return result of cmpxchg of tail word with a new value
16 * @lock: Pointer to queued spinlock structure
17 * @tail: The tail to compare against
18 * @new_tail: The new queue tail code word
19 * Return: Bool to indicate whether the cmpxchg operation succeeded
20 *
21 * This is used by the head of the wait queue to clean up the queue.
22 * Provides relaxed ordering, since observers only rely on initialized
23 * state of the node which was made visible through the xchg_tail operation,
24 * i.e. through the smp_wmb preceding xchg_tail.
25 *
26 * We avoid using 16-bit cmpxchg, which is not available on all architectures.
27 */
try_cmpxchg_tail(struct qspinlock * lock,u32 tail,u32 new_tail)28 static __always_inline bool try_cmpxchg_tail(struct qspinlock *lock, u32 tail, u32 new_tail)
29 {
30 u32 old, new;
31
32 old = atomic_read(&lock->val);
33 do {
34 /*
35 * Is the tail part we compare to already stale? Fail.
36 */
37 if ((old & _Q_TAIL_MASK) != tail)
38 return false;
39 /*
40 * Encode latest locked/pending state for new tail.
41 */
42 new = (old & _Q_LOCKED_PENDING_MASK) | new_tail;
43 } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new));
44
45 return true;
46 }
47
48 #endif /* __LINUX_RQSPINLOCK_H */
49