1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FUTEX_H
3 #define _FUTEX_H
4
5 #include <linux/futex.h>
6 #include <linux/rtmutex.h>
7 #include <linux/sched/wake_q.h>
8 #include <linux/compat.h>
9 #include <linux/uaccess.h>
10 #include <linux/cleanup.h>
11
12 #ifdef CONFIG_PREEMPT_RT
13 #include <linux/rcuwait.h>
14 #endif
15
16 #include <asm/futex.h>
17
18 /*
19 * Futex flags used to encode options to functions and preserve them across
20 * restarts.
21 */
22 #define FLAGS_SIZE_8 0x0000
23 #define FLAGS_SIZE_16 0x0001
24 #define FLAGS_SIZE_32 0x0002
25 #define FLAGS_SIZE_64 0x0003
26
27 #define FLAGS_SIZE_MASK 0x0003
28
29 #ifdef CONFIG_MMU
30 # define FLAGS_SHARED 0x0010
31 #else
32 /*
33 * NOMMU does not have per process address space. Let the compiler optimize
34 * code away.
35 */
36 # define FLAGS_SHARED 0x0000
37 #endif
38 #define FLAGS_CLOCKRT 0x0020
39 #define FLAGS_HAS_TIMEOUT 0x0040
40 #define FLAGS_NUMA 0x0080
41 #define FLAGS_STRICT 0x0100
42 #define FLAGS_MPOL 0x0200
43
44 /* FUTEX_ to FLAGS_ */
futex_to_flags(unsigned int op)45 static inline unsigned int futex_to_flags(unsigned int op)
46 {
47 unsigned int flags = FLAGS_SIZE_32;
48
49 if (!(op & FUTEX_PRIVATE_FLAG))
50 flags |= FLAGS_SHARED;
51
52 if (op & FUTEX_CLOCK_REALTIME)
53 flags |= FLAGS_CLOCKRT;
54
55 return flags;
56 }
57
58 #define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_MPOL | FUTEX2_PRIVATE)
59
60 /* FUTEX2_ to FLAGS_ */
futex2_to_flags(unsigned int flags2)61 static inline unsigned int futex2_to_flags(unsigned int flags2)
62 {
63 unsigned int flags = flags2 & FUTEX2_SIZE_MASK;
64
65 if (!(flags2 & FUTEX2_PRIVATE))
66 flags |= FLAGS_SHARED;
67
68 if (flags2 & FUTEX2_NUMA)
69 flags |= FLAGS_NUMA;
70
71 if (flags2 & FUTEX2_MPOL)
72 flags |= FLAGS_MPOL;
73
74 return flags;
75 }
76
futex_size(unsigned int flags)77 static inline unsigned int futex_size(unsigned int flags)
78 {
79 return 1 << (flags & FLAGS_SIZE_MASK);
80 }
81
futex_flags_valid(unsigned int flags)82 static inline bool futex_flags_valid(unsigned int flags)
83 {
84 /* Only 64bit futexes for 64bit code */
85 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) {
86 if ((flags & FLAGS_SIZE_MASK) == FLAGS_SIZE_64)
87 return false;
88 }
89
90 /* Only 32bit futexes are implemented -- for now */
91 if ((flags & FLAGS_SIZE_MASK) != FLAGS_SIZE_32)
92 return false;
93
94 /*
95 * Must be able to represent both FUTEX_NO_NODE and every valid nodeid
96 * in a futex word.
97 */
98 if (flags & FLAGS_NUMA) {
99 int bits = 8 * futex_size(flags);
100 u64 max = ~0ULL;
101
102 max >>= 64 - bits;
103 if (nr_node_ids >= max)
104 return false;
105 }
106
107 return true;
108 }
109
futex_validate_input(unsigned int flags,u64 val)110 static inline bool futex_validate_input(unsigned int flags, u64 val)
111 {
112 int bits = 8 * futex_size(flags);
113
114 if (bits < 64 && (val >> bits))
115 return false;
116
117 return true;
118 }
119
120 #ifdef CONFIG_FAIL_FUTEX
121 extern bool should_fail_futex(bool fshared);
122 #else
should_fail_futex(bool fshared)123 static inline bool should_fail_futex(bool fshared)
124 {
125 return false;
126 }
127 #endif
128
129 /*
130 * Hash buckets are shared by all the futex_keys that hash to the same
131 * location. Each key may have multiple futex_q structures, one for each task
132 * waiting on a futex.
133 */
134 struct futex_hash_bucket {
135 atomic_t waiters;
136 spinlock_t lock;
137 struct plist_head chain;
138 struct futex_private_hash *priv;
139 } ____cacheline_aligned_in_smp;
140
141 /*
142 * Priority Inheritance state:
143 */
144 struct futex_pi_state {
145 /*
146 * list of 'owned' pi_state instances - these have to be
147 * cleaned up in do_exit() if the task exits prematurely:
148 */
149 struct list_head list;
150
151 /*
152 * The PI object:
153 */
154 struct rt_mutex_base pi_mutex;
155
156 struct task_struct *owner;
157 refcount_t refcount;
158
159 union futex_key key;
160 } __randomize_layout;
161
162 struct futex_q;
163 typedef void (futex_wake_fn)(struct wake_q_head *wake_q, struct futex_q *q);
164
165 /**
166 * struct futex_q - The hashed futex queue entry, one per waiting task
167 * @list: priority-sorted list of tasks waiting on this futex
168 * @task: the task waiting on the futex
169 * @lock_ptr: the hash bucket lock
170 * @wake: the wake handler for this queue
171 * @wake_data: data associated with the wake handler
172 * @key: the key the futex is hashed on
173 * @pi_state: optional priority inheritance state
174 * @rt_waiter: rt_waiter storage for use with requeue_pi
175 * @requeue_pi_key: the requeue_pi target futex key
176 * @bitset: bitset for the optional bitmasked wakeup
177 * @requeue_state: State field for futex_requeue_pi()
178 * @drop_hb_ref: Waiter should drop the extra hash bucket reference if true
179 * @requeue_wait: RCU wait for futex_requeue_pi() (RT only)
180 *
181 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
182 * we can wake only the relevant ones (hashed queues may be shared).
183 *
184 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
185 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
186 * The order of wakeup is always to make the first condition true, then
187 * the second.
188 *
189 * PI futexes are typically woken before they are removed from the hash list via
190 * the rt_mutex code. See futex_unqueue_pi().
191 */
192 struct futex_q {
193 struct plist_node list;
194
195 struct task_struct *task;
196 spinlock_t *lock_ptr;
197 futex_wake_fn *wake;
198 void *wake_data;
199 union futex_key key;
200 struct futex_pi_state *pi_state;
201 struct rt_mutex_waiter *rt_waiter;
202 union futex_key *requeue_pi_key;
203 u32 bitset;
204 atomic_t requeue_state;
205 bool drop_hb_ref;
206 #ifdef CONFIG_PREEMPT_RT
207 struct rcuwait requeue_wait;
208 #endif
209 } __randomize_layout;
210
211 extern const struct futex_q futex_q_init;
212
213 enum futex_access {
214 FUTEX_READ,
215 FUTEX_WRITE
216 };
217
218 extern int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
219 enum futex_access rw);
220 extern void futex_q_lockptr_lock(struct futex_q *q);
221 extern struct hrtimer_sleeper *
222 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
223 int flags, u64 range_ns);
224
225 extern struct futex_hash_bucket *futex_hash(union futex_key *key);
226 #ifdef CONFIG_FUTEX_PRIVATE_HASH
227 extern void futex_hash_get(struct futex_hash_bucket *hb);
228 extern void futex_hash_put(struct futex_hash_bucket *hb);
229
230 extern struct futex_private_hash *futex_private_hash(void);
231 extern void futex_private_hash_put(struct futex_private_hash *fph);
232
233 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
futex_hash_get(struct futex_hash_bucket * hb)234 static inline void futex_hash_get(struct futex_hash_bucket *hb) { }
futex_hash_put(struct futex_hash_bucket * hb)235 static inline void futex_hash_put(struct futex_hash_bucket *hb) { }
futex_private_hash(void)236 static inline struct futex_private_hash *futex_private_hash(void) { return NULL; }
futex_private_hash_put(struct futex_private_hash * fph)237 static inline void futex_private_hash_put(struct futex_private_hash *fph) { }
238 #endif
239
240 DEFINE_CLASS(hb, struct futex_hash_bucket *,
241 if (_T) futex_hash_put(_T),
242 futex_hash(key), union futex_key *key);
243
244 DEFINE_CLASS(private_hash, struct futex_private_hash *,
245 if (_T) futex_private_hash_put(_T),
246 futex_private_hash(), void);
247
248 /**
249 * futex_match - Check whether two futex keys are equal
250 * @key1: Pointer to key1
251 * @key2: Pointer to key2
252 *
253 * Return 1 if two futex_keys are equal, 0 otherwise.
254 */
futex_match(union futex_key * key1,union futex_key * key2)255 static inline int futex_match(union futex_key *key1, union futex_key *key2)
256 {
257 return (key1 && key2
258 && key1->both.word == key2->both.word
259 && key1->both.ptr == key2->both.ptr
260 && key1->both.offset == key2->both.offset);
261 }
262
263 extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
264 struct futex_q *q, union futex_key *key2,
265 struct task_struct *task);
266 extern void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout);
267 extern bool __futex_wake_mark(struct futex_q *q);
268 extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q);
269
270 extern int fault_in_user_writeable(u32 __user *uaddr);
271 extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
272
futex_cmpxchg_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)273 static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
274 {
275 int ret;
276
277 pagefault_disable();
278 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
279 pagefault_enable();
280
281 return ret;
282 }
283
284 /*
285 * This does a plain atomic user space read, and the user pointer has
286 * already been verified earlier by get_futex_key() to be both aligned
287 * and actually in user space, just like futex_atomic_cmpxchg_inatomic().
288 *
289 * We still want to avoid any speculation, and while __get_user() is
290 * the traditional model for this, it's actually slower than doing
291 * this manually these days.
292 *
293 * We could just have a per-architecture special function for it,
294 * the same way we do futex_atomic_cmpxchg_inatomic(), but rather
295 * than force everybody to do that, write it out long-hand using
296 * the low-level user-access infrastructure.
297 *
298 * This looks a bit overkill, but generally just results in a couple
299 * of instructions.
300 */
futex_get_value(u32 * dest,u32 __user * from)301 static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
302 {
303 u32 val;
304
305 if (can_do_masked_user_access())
306 from = masked_user_access_begin(from);
307 else if (!user_read_access_begin(from, sizeof(*from)))
308 return -EFAULT;
309 unsafe_get_user(val, from, Efault);
310 user_read_access_end();
311 *dest = val;
312 return 0;
313 Efault:
314 user_read_access_end();
315 return -EFAULT;
316 }
317
futex_put_value(u32 val,u32 __user * to)318 static __always_inline int futex_put_value(u32 val, u32 __user *to)
319 {
320 if (can_do_masked_user_access())
321 to = masked_user_access_begin(to);
322 else if (!user_read_access_begin(to, sizeof(*to)))
323 return -EFAULT;
324 unsafe_put_user(val, to, Efault);
325 user_read_access_end();
326 return 0;
327 Efault:
328 user_read_access_end();
329 return -EFAULT;
330 }
331
futex_get_value_locked(u32 * dest,u32 __user * from)332 static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
333 {
334 int ret;
335
336 pagefault_disable();
337 ret = futex_get_value(dest, from);
338 pagefault_enable();
339
340 return ret;
341 }
342
343 extern void __futex_unqueue(struct futex_q *q);
344 extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
345 struct task_struct *task);
346 extern int futex_unqueue(struct futex_q *q);
347
348 /**
349 * futex_queue() - Enqueue the futex_q on the futex_hash_bucket
350 * @q: The futex_q to enqueue
351 * @hb: The destination hash bucket
352 * @task: Task queueing this futex
353 *
354 * The hb->lock must be held by the caller, and is released here. A call to
355 * futex_queue() is typically paired with exactly one call to futex_unqueue(). The
356 * exceptions involve the PI related operations, which may use futex_unqueue_pi()
357 * or nothing if the unqueue is done as part of the wake process and the unqueue
358 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
359 * an example).
360 *
361 * Note that @task may be NULL, for async usage of futexes.
362 */
futex_queue(struct futex_q * q,struct futex_hash_bucket * hb,struct task_struct * task)363 static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
364 struct task_struct *task)
365 __releases(&hb->lock)
366 {
367 __futex_queue(q, hb, task);
368 spin_unlock(&hb->lock);
369 }
370
371 extern void futex_unqueue_pi(struct futex_q *q);
372
373 extern void wait_for_owner_exiting(int ret, struct task_struct *exiting);
374
375 /*
376 * Reflects a new waiter being added to the waitqueue.
377 */
futex_hb_waiters_inc(struct futex_hash_bucket * hb)378 static inline void futex_hb_waiters_inc(struct futex_hash_bucket *hb)
379 {
380 #ifdef CONFIG_SMP
381 atomic_inc(&hb->waiters);
382 /*
383 * Full barrier (A), see the ordering comment above.
384 */
385 smp_mb__after_atomic();
386 #endif
387 }
388
389 /*
390 * Reflects a waiter being removed from the waitqueue by wakeup
391 * paths.
392 */
futex_hb_waiters_dec(struct futex_hash_bucket * hb)393 static inline void futex_hb_waiters_dec(struct futex_hash_bucket *hb)
394 {
395 #ifdef CONFIG_SMP
396 atomic_dec(&hb->waiters);
397 #endif
398 }
399
futex_hb_waiters_pending(struct futex_hash_bucket * hb)400 static inline int futex_hb_waiters_pending(struct futex_hash_bucket *hb)
401 {
402 #ifdef CONFIG_SMP
403 /*
404 * Full barrier (B), see the ordering comment above.
405 */
406 smp_mb();
407 return atomic_read(&hb->waiters);
408 #else
409 return 1;
410 #endif
411 }
412
413 extern void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb);
414 extern void futex_q_unlock(struct futex_hash_bucket *hb);
415
416
417 extern int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
418 union futex_key *key,
419 struct futex_pi_state **ps,
420 struct task_struct *task,
421 struct task_struct **exiting,
422 int set_waiters);
423
424 extern int refill_pi_state_cache(void);
425 extern void get_pi_state(struct futex_pi_state *pi_state);
426 extern void put_pi_state(struct futex_pi_state *pi_state);
427 extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
428
429 /*
430 * Express the locking dependencies for lockdep:
431 */
432 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)433 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
434 {
435 if (hb1 > hb2)
436 swap(hb1, hb2);
437
438 spin_lock(&hb1->lock);
439 if (hb1 != hb2)
440 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
441 }
442
443 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)444 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
445 {
446 spin_unlock(&hb1->lock);
447 if (hb1 != hb2)
448 spin_unlock(&hb2->lock);
449 }
450
451 /* syscalls */
452
453 extern int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32
454 val, ktime_t *abs_time, u32 bitset, u32 __user
455 *uaddr2);
456
457 extern int futex_requeue(u32 __user *uaddr1, unsigned int flags1,
458 u32 __user *uaddr2, unsigned int flags2,
459 int nr_wake, int nr_requeue,
460 u32 *cmpval, int requeue_pi);
461
462 extern int __futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
463 struct hrtimer_sleeper *to, u32 bitset);
464
465 extern int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
466 ktime_t *abs_time, u32 bitset);
467
468 /**
469 * struct futex_vector - Auxiliary struct for futex_waitv()
470 * @w: Userspace provided data
471 * @q: Kernel side data
472 *
473 * Struct used to build an array with all data need for futex_waitv()
474 */
475 struct futex_vector {
476 struct futex_waitv w;
477 struct futex_q q;
478 };
479
480 extern int futex_parse_waitv(struct futex_vector *futexv,
481 struct futex_waitv __user *uwaitv,
482 unsigned int nr_futexes, futex_wake_fn *wake,
483 void *wake_data);
484
485 extern int futex_wait_multiple_setup(struct futex_vector *vs, int count,
486 int *woken);
487
488 extern int futex_unqueue_multiple(struct futex_vector *v, int count);
489
490 extern int futex_wait_multiple(struct futex_vector *vs, unsigned int count,
491 struct hrtimer_sleeper *to);
492
493 extern int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset);
494
495 extern int futex_wake_op(u32 __user *uaddr1, unsigned int flags,
496 u32 __user *uaddr2, int nr_wake, int nr_wake2, int op);
497
498 extern int futex_unlock_pi(u32 __user *uaddr, unsigned int flags);
499
500 extern int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock);
501
502 #endif /* _FUTEX_H */
503