xref: /linux/kernel/futex/core.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Fast Userspace Mutexes (which I call "Futexes!").
4  *  (C) Rusty Russell, IBM 2002
5  *
6  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8  *
9  *  Removed page pinning, fix privately mapped COW pages and other cleanups
10  *  (C) Copyright 2003, 2004 Jamie Lokier
11  *
12  *  Robust futex support started by Ingo Molnar
13  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15  *
16  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
17  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19  *
20  *  PRIVATE futexes by Eric Dumazet
21  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22  *
23  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24  *  Copyright (C) IBM Corporation, 2009
25  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
26  *
27  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28  *  enough at me, Linus for the original (flawed) idea, Matthew
29  *  Kirkwood for proof-of-concept implementation.
30  *
31  *  "The futexes are also cursed."
32  *  "But they come in a choice of three flavours!"
33  */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/gfp.h>
40 #include <linux/vmalloc.h>
41 #include <linux/memblock.h>
42 #include <linux/fault-inject.h>
43 #include <linux/slab.h>
44 #include <linux/prctl.h>
45 #include <linux/mempolicy.h>
46 #include <linux/mmap_lock.h>
47 
48 #include "futex.h"
49 #include "../locking/rtmutex_common.h"
50 
51 /*
52  * The base of the bucket array and its size are always used together
53  * (after initialization only in futex_hash()), so ensure that they
54  * reside in the same cacheline.
55  */
56 static struct {
57 	unsigned long            hashmask;
58 	unsigned int		 hashshift;
59 	struct futex_hash_bucket *queues[MAX_NUMNODES];
60 } __futex_data __read_mostly __aligned(2*sizeof(long));
61 
62 #define futex_hashmask	(__futex_data.hashmask)
63 #define futex_hashshift	(__futex_data.hashshift)
64 #define futex_queues	(__futex_data.queues)
65 
66 struct futex_private_hash {
67 	int		state;
68 	unsigned int	hash_mask;
69 	struct rcu_head	rcu;
70 	void		*mm;
71 	bool		custom;
72 	struct futex_hash_bucket queues[];
73 };
74 
75 /*
76  * Fault injections for futexes.
77  */
78 #ifdef CONFIG_FAIL_FUTEX
79 
80 static struct {
81 	struct fault_attr attr;
82 
83 	bool ignore_private;
84 } fail_futex = {
85 	.attr = FAULT_ATTR_INITIALIZER,
86 	.ignore_private = false,
87 };
88 
setup_fail_futex(char * str)89 static int __init setup_fail_futex(char *str)
90 {
91 	return setup_fault_attr(&fail_futex.attr, str);
92 }
93 __setup("fail_futex=", setup_fail_futex);
94 
should_fail_futex(bool fshared)95 bool should_fail_futex(bool fshared)
96 {
97 	if (fail_futex.ignore_private && !fshared)
98 		return false;
99 
100 	return should_fail(&fail_futex.attr, 1);
101 }
102 
103 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
104 
fail_futex_debugfs(void)105 static int __init fail_futex_debugfs(void)
106 {
107 	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
108 	struct dentry *dir;
109 
110 	dir = fault_create_debugfs_attr("fail_futex", NULL,
111 					&fail_futex.attr);
112 	if (IS_ERR(dir))
113 		return PTR_ERR(dir);
114 
115 	debugfs_create_bool("ignore-private", mode, dir,
116 			    &fail_futex.ignore_private);
117 	return 0;
118 }
119 
120 late_initcall(fail_futex_debugfs);
121 
122 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
123 
124 #endif /* CONFIG_FAIL_FUTEX */
125 
126 static struct futex_hash_bucket *
127 __futex_hash(union futex_key *key, struct futex_private_hash *fph);
128 
129 #ifdef CONFIG_FUTEX_PRIVATE_HASH
130 static bool futex_ref_get(struct futex_private_hash *fph);
131 static bool futex_ref_put(struct futex_private_hash *fph);
132 static bool futex_ref_is_dead(struct futex_private_hash *fph);
133 
134 enum { FR_PERCPU = 0, FR_ATOMIC };
135 
futex_key_is_private(union futex_key * key)136 static inline bool futex_key_is_private(union futex_key *key)
137 {
138 	/*
139 	 * Relies on get_futex_key() to set either bit for shared
140 	 * futexes -- see comment with union futex_key.
141 	 */
142 	return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED));
143 }
144 
futex_private_hash_get(struct futex_private_hash * fph)145 static bool futex_private_hash_get(struct futex_private_hash *fph)
146 {
147 	return futex_ref_get(fph);
148 }
149 
futex_private_hash_put(struct futex_private_hash * fph)150 void futex_private_hash_put(struct futex_private_hash *fph)
151 {
152 	if (futex_ref_put(fph))
153 		wake_up_var(fph->mm);
154 }
155 
156 /**
157  * futex_hash_get - Get an additional reference for the local hash.
158  * @hb:                    ptr to the private local hash.
159  *
160  * Obtain an additional reference for the already obtained hash bucket. The
161  * caller must already own an reference.
162  */
futex_hash_get(struct futex_hash_bucket * hb)163 void futex_hash_get(struct futex_hash_bucket *hb)
164 {
165 	struct futex_private_hash *fph = hb->priv;
166 
167 	if (!fph)
168 		return;
169 	WARN_ON_ONCE(!futex_private_hash_get(fph));
170 }
171 
futex_hash_put(struct futex_hash_bucket * hb)172 void futex_hash_put(struct futex_hash_bucket *hb)
173 {
174 	struct futex_private_hash *fph = hb->priv;
175 
176 	if (!fph)
177 		return;
178 	futex_private_hash_put(fph);
179 }
180 
181 static struct futex_hash_bucket *
__futex_hash_private(union futex_key * key,struct futex_private_hash * fph)182 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
183 {
184 	u32 hash;
185 
186 	if (!futex_key_is_private(key))
187 		return NULL;
188 
189 	if (!fph)
190 		fph = rcu_dereference(key->private.mm->futex_phash);
191 	if (!fph || !fph->hash_mask)
192 		return NULL;
193 
194 	hash = jhash2((void *)&key->private.address,
195 		      sizeof(key->private.address) / 4,
196 		      key->both.offset);
197 	return &fph->queues[hash & fph->hash_mask];
198 }
199 
futex_rehash_private(struct futex_private_hash * old,struct futex_private_hash * new)200 static void futex_rehash_private(struct futex_private_hash *old,
201 				 struct futex_private_hash *new)
202 {
203 	struct futex_hash_bucket *hb_old, *hb_new;
204 	unsigned int slots = old->hash_mask + 1;
205 	unsigned int i;
206 
207 	for (i = 0; i < slots; i++) {
208 		struct futex_q *this, *tmp;
209 
210 		hb_old = &old->queues[i];
211 
212 		spin_lock(&hb_old->lock);
213 		plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) {
214 
215 			plist_del(&this->list, &hb_old->chain);
216 			futex_hb_waiters_dec(hb_old);
217 
218 			WARN_ON_ONCE(this->lock_ptr != &hb_old->lock);
219 
220 			hb_new = __futex_hash(&this->key, new);
221 			futex_hb_waiters_inc(hb_new);
222 			/*
223 			 * The new pointer isn't published yet but an already
224 			 * moved user can be unqueued due to timeout or signal.
225 			 */
226 			spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING);
227 			plist_add(&this->list, &hb_new->chain);
228 			this->lock_ptr = &hb_new->lock;
229 			spin_unlock(&hb_new->lock);
230 		}
231 		spin_unlock(&hb_old->lock);
232 	}
233 }
234 
__futex_pivot_hash(struct mm_struct * mm,struct futex_private_hash * new)235 static bool __futex_pivot_hash(struct mm_struct *mm,
236 			       struct futex_private_hash *new)
237 {
238 	struct futex_private_hash *fph;
239 
240 	WARN_ON_ONCE(mm->futex_phash_new);
241 
242 	fph = rcu_dereference_protected(mm->futex_phash,
243 					lockdep_is_held(&mm->futex_hash_lock));
244 	if (fph) {
245 		if (!futex_ref_is_dead(fph)) {
246 			mm->futex_phash_new = new;
247 			return false;
248 		}
249 
250 		futex_rehash_private(fph, new);
251 	}
252 	new->state = FR_PERCPU;
253 	scoped_guard(rcu) {
254 		mm->futex_batches = get_state_synchronize_rcu();
255 		rcu_assign_pointer(mm->futex_phash, new);
256 	}
257 	kvfree_rcu(fph, rcu);
258 	return true;
259 }
260 
futex_pivot_hash(struct mm_struct * mm)261 static void futex_pivot_hash(struct mm_struct *mm)
262 {
263 	scoped_guard(mutex, &mm->futex_hash_lock) {
264 		struct futex_private_hash *fph;
265 
266 		fph = mm->futex_phash_new;
267 		if (fph) {
268 			mm->futex_phash_new = NULL;
269 			__futex_pivot_hash(mm, fph);
270 		}
271 	}
272 }
273 
futex_private_hash(void)274 struct futex_private_hash *futex_private_hash(void)
275 {
276 	struct mm_struct *mm = current->mm;
277 	/*
278 	 * Ideally we don't loop. If there is a replacement in progress
279 	 * then a new private hash is already prepared and a reference can't be
280 	 * obtained once the last user dropped it's.
281 	 * In that case we block on mm_struct::futex_hash_lock and either have
282 	 * to perform the replacement or wait while someone else is doing the
283 	 * job. Eitherway, on the second iteration we acquire a reference on the
284 	 * new private hash or loop again because a new replacement has been
285 	 * requested.
286 	 */
287 again:
288 	scoped_guard(rcu) {
289 		struct futex_private_hash *fph;
290 
291 		fph = rcu_dereference(mm->futex_phash);
292 		if (!fph)
293 			return NULL;
294 
295 		if (futex_private_hash_get(fph))
296 			return fph;
297 	}
298 	futex_pivot_hash(mm);
299 	goto again;
300 }
301 
futex_hash(union futex_key * key)302 struct futex_hash_bucket *futex_hash(union futex_key *key)
303 {
304 	struct futex_private_hash *fph;
305 	struct futex_hash_bucket *hb;
306 
307 again:
308 	scoped_guard(rcu) {
309 		hb = __futex_hash(key, NULL);
310 		fph = hb->priv;
311 
312 		if (!fph || futex_private_hash_get(fph))
313 			return hb;
314 	}
315 	futex_pivot_hash(key->private.mm);
316 	goto again;
317 }
318 
319 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
320 
321 static struct futex_hash_bucket *
__futex_hash_private(union futex_key * key,struct futex_private_hash * fph)322 __futex_hash_private(union futex_key *key, struct futex_private_hash *fph)
323 {
324 	return NULL;
325 }
326 
futex_hash(union futex_key * key)327 struct futex_hash_bucket *futex_hash(union futex_key *key)
328 {
329 	return __futex_hash(key, NULL);
330 }
331 
332 #endif /* CONFIG_FUTEX_PRIVATE_HASH */
333 
334 #ifdef CONFIG_FUTEX_MPOL
335 
__futex_key_to_node(struct mm_struct * mm,unsigned long addr)336 static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr)
337 {
338 	struct vm_area_struct *vma = vma_lookup(mm, addr);
339 	struct mempolicy *mpol;
340 	int node = FUTEX_NO_NODE;
341 
342 	if (!vma)
343 		return FUTEX_NO_NODE;
344 
345 	mpol = vma_policy(vma);
346 	if (!mpol)
347 		return FUTEX_NO_NODE;
348 
349 	switch (mpol->mode) {
350 	case MPOL_PREFERRED:
351 		node = first_node(mpol->nodes);
352 		break;
353 	case MPOL_PREFERRED_MANY:
354 	case MPOL_BIND:
355 		if (mpol->home_node != NUMA_NO_NODE)
356 			node = mpol->home_node;
357 		break;
358 	default:
359 		break;
360 	}
361 
362 	return node;
363 }
364 
futex_key_to_node_opt(struct mm_struct * mm,unsigned long addr)365 static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr)
366 {
367 	int seq, node;
368 
369 	guard(rcu)();
370 
371 	if (!mmap_lock_speculate_try_begin(mm, &seq))
372 		return -EBUSY;
373 
374 	node = __futex_key_to_node(mm, addr);
375 
376 	if (mmap_lock_speculate_retry(mm, seq))
377 		return -EAGAIN;
378 
379 	return node;
380 }
381 
futex_mpol(struct mm_struct * mm,unsigned long addr)382 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
383 {
384 	int node;
385 
386 	node = futex_key_to_node_opt(mm, addr);
387 	if (node >= FUTEX_NO_NODE)
388 		return node;
389 
390 	guard(mmap_read_lock)(mm);
391 	return __futex_key_to_node(mm, addr);
392 }
393 
394 #else /* !CONFIG_FUTEX_MPOL */
395 
futex_mpol(struct mm_struct * mm,unsigned long addr)396 static int futex_mpol(struct mm_struct *mm, unsigned long addr)
397 {
398 	return FUTEX_NO_NODE;
399 }
400 
401 #endif /* CONFIG_FUTEX_MPOL */
402 
403 /**
404  * __futex_hash - Return the hash bucket
405  * @key:	Pointer to the futex key for which the hash is calculated
406  * @fph:	Pointer to private hash if known
407  *
408  * We hash on the keys returned from get_futex_key (see below) and return the
409  * corresponding hash bucket.
410  * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the
411  * private hash) is returned if existing. Otherwise a hash bucket from the
412  * global hash is returned.
413  */
414 static struct futex_hash_bucket *
__futex_hash(union futex_key * key,struct futex_private_hash * fph)415 __futex_hash(union futex_key *key, struct futex_private_hash *fph)
416 {
417 	int node = key->both.node;
418 	u32 hash;
419 
420 	if (node == FUTEX_NO_NODE) {
421 		struct futex_hash_bucket *hb;
422 
423 		hb = __futex_hash_private(key, fph);
424 		if (hb)
425 			return hb;
426 	}
427 
428 	hash = jhash2((u32 *)key,
429 		      offsetof(typeof(*key), both.offset) / sizeof(u32),
430 		      key->both.offset);
431 
432 	if (node == FUTEX_NO_NODE) {
433 		/*
434 		 * In case of !FLAGS_NUMA, use some unused hash bits to pick a
435 		 * node -- this ensures regular futexes are interleaved across
436 		 * the nodes and avoids having to allocate multiple
437 		 * hash-tables.
438 		 *
439 		 * NOTE: this isn't perfectly uniform, but it is fast and
440 		 * handles sparse node masks.
441 		 */
442 		node = (hash >> futex_hashshift) % nr_node_ids;
443 		if (!node_possible(node)) {
444 			node = find_next_bit_wrap(node_possible_map.bits,
445 						  nr_node_ids, node);
446 		}
447 	}
448 
449 	return &futex_queues[node][hash & futex_hashmask];
450 }
451 
452 /**
453  * futex_setup_timer - set up the sleeping hrtimer.
454  * @time:	ptr to the given timeout value
455  * @timeout:	the hrtimer_sleeper structure to be set up
456  * @flags:	futex flags
457  * @range_ns:	optional range in ns
458  *
459  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
460  *	   value given
461  */
462 struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)463 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
464 		  int flags, u64 range_ns)
465 {
466 	if (!time)
467 		return NULL;
468 
469 	hrtimer_setup_sleeper_on_stack(timeout,
470 				       (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
471 				       HRTIMER_MODE_ABS);
472 	/*
473 	 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
474 	 * effectively the same as calling hrtimer_set_expires().
475 	 */
476 	hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
477 
478 	return timeout;
479 }
480 
481 /*
482  * Generate a machine wide unique identifier for this inode.
483  *
484  * This relies on u64 not wrapping in the life-time of the machine; which with
485  * 1ns resolution means almost 585 years.
486  *
487  * This further relies on the fact that a well formed program will not unmap
488  * the file while it has a (shared) futex waiting on it. This mapping will have
489  * a file reference which pins the mount and inode.
490  *
491  * If for some reason an inode gets evicted and read back in again, it will get
492  * a new sequence number and will _NOT_ match, even though it is the exact same
493  * file.
494  *
495  * It is important that futex_match() will never have a false-positive, esp.
496  * for PI futexes that can mess up the state. The above argues that false-negatives
497  * are only possible for malformed programs.
498  */
get_inode_sequence_number(struct inode * inode)499 static u64 get_inode_sequence_number(struct inode *inode)
500 {
501 	static atomic64_t i_seq;
502 	u64 old;
503 
504 	/* Does the inode already have a sequence number? */
505 	old = atomic64_read(&inode->i_sequence);
506 	if (likely(old))
507 		return old;
508 
509 	for (;;) {
510 		u64 new = atomic64_inc_return(&i_seq);
511 		if (WARN_ON_ONCE(!new))
512 			continue;
513 
514 		old = 0;
515 		if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new))
516 			return old;
517 		return new;
518 	}
519 }
520 
521 /**
522  * get_futex_key() - Get parameters which are the keys for a futex
523  * @uaddr:	virtual address of the futex
524  * @flags:	FLAGS_*
525  * @key:	address where result is stored.
526  * @rw:		mapping needs to be read/write (values: FUTEX_READ,
527  *              FUTEX_WRITE)
528  *
529  * Return: a negative error code or 0
530  *
531  * The key words are stored in @key on success.
532  *
533  * For shared mappings (when @fshared), the key is:
534  *
535  *   ( inode->i_sequence, page offset within mapping, offset_within_page )
536  *
537  * [ also see get_inode_sequence_number() ]
538  *
539  * For private mappings (or when !@fshared), the key is:
540  *
541  *   ( current->mm, address, 0 )
542  *
543  * This allows (cross process, where applicable) identification of the futex
544  * without keeping the page pinned for the duration of the FUTEX_WAIT.
545  *
546  * lock_page() might sleep, the caller should not hold a spinlock.
547  */
get_futex_key(u32 __user * uaddr,unsigned int flags,union futex_key * key,enum futex_access rw)548 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
549 		  enum futex_access rw)
550 {
551 	unsigned long address = (unsigned long)uaddr;
552 	struct mm_struct *mm = current->mm;
553 	struct page *page;
554 	struct folio *folio;
555 	struct address_space *mapping;
556 	int node, err, size, ro = 0;
557 	bool node_updated = false;
558 	bool fshared;
559 
560 	fshared = flags & FLAGS_SHARED;
561 	size = futex_size(flags);
562 	if (flags & FLAGS_NUMA)
563 		size *= 2;
564 
565 	/*
566 	 * The futex address must be "naturally" aligned.
567 	 */
568 	key->both.offset = address % PAGE_SIZE;
569 	if (unlikely((address % size) != 0))
570 		return -EINVAL;
571 	address -= key->both.offset;
572 
573 	if (unlikely(!access_ok(uaddr, size)))
574 		return -EFAULT;
575 
576 	if (unlikely(should_fail_futex(fshared)))
577 		return -EFAULT;
578 
579 	node = FUTEX_NO_NODE;
580 
581 	if (flags & FLAGS_NUMA) {
582 		u32 __user *naddr = (void *)uaddr + size / 2;
583 
584 		if (futex_get_value(&node, naddr))
585 			return -EFAULT;
586 
587 		if ((node != FUTEX_NO_NODE) &&
588 		    ((unsigned int)node >= MAX_NUMNODES || !node_possible(node)))
589 			return -EINVAL;
590 	}
591 
592 	if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) {
593 		node = futex_mpol(mm, address);
594 		node_updated = true;
595 	}
596 
597 	if (flags & FLAGS_NUMA) {
598 		u32 __user *naddr = (void *)uaddr + size / 2;
599 
600 		if (node == FUTEX_NO_NODE) {
601 			node = numa_node_id();
602 			node_updated = true;
603 		}
604 		if (node_updated && futex_put_value(node, naddr))
605 			return -EFAULT;
606 	}
607 
608 	key->both.node = node;
609 
610 	/*
611 	 * PROCESS_PRIVATE futexes are fast.
612 	 * As the mm cannot disappear under us and the 'key' only needs
613 	 * virtual address, we dont even have to find the underlying vma.
614 	 * Note : We do have to check 'uaddr' is a valid user address,
615 	 *        but access_ok() should be faster than find_vma()
616 	 */
617 	if (!fshared) {
618 		/*
619 		 * On no-MMU, shared futexes are treated as private, therefore
620 		 * we must not include the current process in the key. Since
621 		 * there is only one address space, the address is a unique key
622 		 * on its own.
623 		 */
624 		if (IS_ENABLED(CONFIG_MMU))
625 			key->private.mm = mm;
626 		else
627 			key->private.mm = NULL;
628 
629 		key->private.address = address;
630 		return 0;
631 	}
632 
633 again:
634 	/* Ignore any VERIFY_READ mapping (futex common case) */
635 	if (unlikely(should_fail_futex(true)))
636 		return -EFAULT;
637 
638 	err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
639 	/*
640 	 * If write access is not required (eg. FUTEX_WAIT), try
641 	 * and get read-only access.
642 	 */
643 	if (err == -EFAULT && rw == FUTEX_READ) {
644 		err = get_user_pages_fast(address, 1, 0, &page);
645 		ro = 1;
646 	}
647 	if (err < 0)
648 		return err;
649 	else
650 		err = 0;
651 
652 	/*
653 	 * The treatment of mapping from this point on is critical. The folio
654 	 * lock protects many things but in this context the folio lock
655 	 * stabilizes mapping, prevents inode freeing in the shared
656 	 * file-backed region case and guards against movement to swap cache.
657 	 *
658 	 * Strictly speaking the folio lock is not needed in all cases being
659 	 * considered here and folio lock forces unnecessarily serialization.
660 	 * From this point on, mapping will be re-verified if necessary and
661 	 * folio lock will be acquired only if it is unavoidable
662 	 *
663 	 * Mapping checks require the folio so it is looked up now. For
664 	 * anonymous pages, it does not matter if the folio is split
665 	 * in the future as the key is based on the address. For
666 	 * filesystem-backed pages, the precise page is required as the
667 	 * index of the page determines the key.
668 	 */
669 	folio = page_folio(page);
670 	mapping = READ_ONCE(folio->mapping);
671 
672 	/*
673 	 * If folio->mapping is NULL, then it cannot be an anonymous
674 	 * page; but it might be the ZERO_PAGE or in the gate area or
675 	 * in a special mapping (all cases which we are happy to fail);
676 	 * or it may have been a good file page when get_user_pages_fast
677 	 * found it, but truncated or holepunched or subjected to
678 	 * invalidate_complete_page2 before we got the folio lock (also
679 	 * cases which we are happy to fail).  And we hold a reference,
680 	 * so refcount care in invalidate_inode_page's remove_mapping
681 	 * prevents drop_caches from setting mapping to NULL beneath us.
682 	 *
683 	 * The case we do have to guard against is when memory pressure made
684 	 * shmem_writepage move it from filecache to swapcache beneath us:
685 	 * an unlikely race, but we do need to retry for folio->mapping.
686 	 */
687 	if (unlikely(!mapping)) {
688 		int shmem_swizzled;
689 
690 		/*
691 		 * Folio lock is required to identify which special case above
692 		 * applies. If this is really a shmem page then the folio lock
693 		 * will prevent unexpected transitions.
694 		 */
695 		folio_lock(folio);
696 		shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
697 		folio_unlock(folio);
698 		folio_put(folio);
699 
700 		if (shmem_swizzled)
701 			goto again;
702 
703 		return -EFAULT;
704 	}
705 
706 	/*
707 	 * Private mappings are handled in a simple way.
708 	 *
709 	 * If the futex key is stored in anonymous memory, then the associated
710 	 * object is the mm which is implicitly pinned by the calling process.
711 	 *
712 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
713 	 * it's a read-only handle, it's expected that futexes attach to
714 	 * the object not the particular process.
715 	 */
716 	if (folio_test_anon(folio)) {
717 		/*
718 		 * A RO anonymous page will never change and thus doesn't make
719 		 * sense for futex operations.
720 		 */
721 		if (unlikely(should_fail_futex(true)) || ro) {
722 			err = -EFAULT;
723 			goto out;
724 		}
725 
726 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
727 		key->private.mm = mm;
728 		key->private.address = address;
729 
730 	} else {
731 		struct inode *inode;
732 
733 		/*
734 		 * The associated futex object in this case is the inode and
735 		 * the folio->mapping must be traversed. Ordinarily this should
736 		 * be stabilised under folio lock but it's not strictly
737 		 * necessary in this case as we just want to pin the inode, not
738 		 * update i_pages or anything like that.
739 		 *
740 		 * The RCU read lock is taken as the inode is finally freed
741 		 * under RCU. If the mapping still matches expectations then the
742 		 * mapping->host can be safely accessed as being a valid inode.
743 		 */
744 		rcu_read_lock();
745 
746 		if (READ_ONCE(folio->mapping) != mapping) {
747 			rcu_read_unlock();
748 			folio_put(folio);
749 
750 			goto again;
751 		}
752 
753 		inode = READ_ONCE(mapping->host);
754 		if (!inode) {
755 			rcu_read_unlock();
756 			folio_put(folio);
757 
758 			goto again;
759 		}
760 
761 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
762 		key->shared.i_seq = get_inode_sequence_number(inode);
763 		key->shared.pgoff = page_pgoff(folio, page);
764 		rcu_read_unlock();
765 	}
766 
767 out:
768 	folio_put(folio);
769 	return err;
770 }
771 
772 /**
773  * fault_in_user_writeable() - Fault in user address and verify RW access
774  * @uaddr:	pointer to faulting user space address
775  *
776  * Slow path to fixup the fault we just took in the atomic write
777  * access to @uaddr.
778  *
779  * We have no generic implementation of a non-destructive write to the
780  * user address. We know that we faulted in the atomic pagefault
781  * disabled section so we can as well avoid the #PF overhead by
782  * calling get_user_pages() right away.
783  */
fault_in_user_writeable(u32 __user * uaddr)784 int fault_in_user_writeable(u32 __user *uaddr)
785 {
786 	struct mm_struct *mm = current->mm;
787 	int ret;
788 
789 	mmap_read_lock(mm);
790 	ret = fixup_user_fault(mm, (unsigned long)uaddr,
791 			       FAULT_FLAG_WRITE, NULL);
792 	mmap_read_unlock(mm);
793 
794 	return ret < 0 ? ret : 0;
795 }
796 
797 /**
798  * futex_top_waiter() - Return the highest priority waiter on a futex
799  * @hb:		the hash bucket the futex_q's reside in
800  * @key:	the futex key (to distinguish it from other futex futex_q's)
801  *
802  * Must be called with the hb lock held.
803  */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)804 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
805 {
806 	struct futex_q *this;
807 
808 	plist_for_each_entry(this, &hb->chain, list) {
809 		if (futex_match(&this->key, key))
810 			return this;
811 	}
812 	return NULL;
813 }
814 
815 /**
816  * wait_for_owner_exiting - Block until the owner has exited
817  * @ret: owner's current futex lock status
818  * @exiting:	Pointer to the exiting task
819  *
820  * Caller must hold a refcount on @exiting.
821  */
wait_for_owner_exiting(int ret,struct task_struct * exiting)822 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
823 {
824 	if (ret != -EBUSY) {
825 		WARN_ON_ONCE(exiting);
826 		return;
827 	}
828 
829 	if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
830 		return;
831 
832 	mutex_lock(&exiting->futex_exit_mutex);
833 	/*
834 	 * No point in doing state checking here. If the waiter got here
835 	 * while the task was in exec()->exec_futex_release() then it can
836 	 * have any FUTEX_STATE_* value when the waiter has acquired the
837 	 * mutex. OK, if running, EXITING or DEAD if it reached exit()
838 	 * already. Highly unlikely and not a problem. Just one more round
839 	 * through the futex maze.
840 	 */
841 	mutex_unlock(&exiting->futex_exit_mutex);
842 
843 	put_task_struct(exiting);
844 }
845 
846 /**
847  * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
848  * @q:	The futex_q to unqueue
849  *
850  * The q->lock_ptr must not be NULL and must be held by the caller.
851  */
__futex_unqueue(struct futex_q * q)852 void __futex_unqueue(struct futex_q *q)
853 {
854 	struct futex_hash_bucket *hb;
855 
856 	if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
857 		return;
858 	lockdep_assert_held(q->lock_ptr);
859 
860 	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
861 	plist_del(&q->list, &hb->chain);
862 	futex_hb_waiters_dec(hb);
863 }
864 
865 /* The key must be already stored in q->key. */
futex_q_lock(struct futex_q * q,struct futex_hash_bucket * hb)866 void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
867 	__acquires(&hb->lock)
868 {
869 	/*
870 	 * Increment the counter before taking the lock so that
871 	 * a potential waker won't miss a to-be-slept task that is
872 	 * waiting for the spinlock. This is safe as all futex_q_lock()
873 	 * users end up calling futex_queue(). Similarly, for housekeeping,
874 	 * decrement the counter at futex_q_unlock() when some error has
875 	 * occurred and we don't end up adding the task to the list.
876 	 */
877 	futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
878 
879 	q->lock_ptr = &hb->lock;
880 
881 	spin_lock(&hb->lock);
882 }
883 
futex_q_unlock(struct futex_hash_bucket * hb)884 void futex_q_unlock(struct futex_hash_bucket *hb)
885 	__releases(&hb->lock)
886 {
887 	futex_hb_waiters_dec(hb);
888 	spin_unlock(&hb->lock);
889 }
890 
__futex_queue(struct futex_q * q,struct futex_hash_bucket * hb,struct task_struct * task)891 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
892 		   struct task_struct *task)
893 {
894 	int prio;
895 
896 	/*
897 	 * The priority used to register this element is
898 	 * - either the real thread-priority for the real-time threads
899 	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
900 	 * - or MAX_RT_PRIO for non-RT threads.
901 	 * Thus, all RT-threads are woken first in priority order, and
902 	 * the others are woken last, in FIFO order.
903 	 */
904 	prio = min(current->normal_prio, MAX_RT_PRIO);
905 
906 	plist_node_init(&q->list, prio);
907 	plist_add(&q->list, &hb->chain);
908 	q->task = task;
909 }
910 
911 /**
912  * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
913  * @q:	The futex_q to unqueue
914  *
915  * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
916  * be paired with exactly one earlier call to futex_queue().
917  *
918  * Return:
919  *  - 1 - if the futex_q was still queued (and we removed unqueued it);
920  *  - 0 - if the futex_q was already removed by the waking thread
921  */
futex_unqueue(struct futex_q * q)922 int futex_unqueue(struct futex_q *q)
923 {
924 	spinlock_t *lock_ptr;
925 	int ret = 0;
926 
927 	/* RCU so lock_ptr is not going away during locking. */
928 	guard(rcu)();
929 	/* In the common case we don't take the spinlock, which is nice. */
930 retry:
931 	/*
932 	 * q->lock_ptr can change between this read and the following spin_lock.
933 	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
934 	 * optimizing lock_ptr out of the logic below.
935 	 */
936 	lock_ptr = READ_ONCE(q->lock_ptr);
937 	if (lock_ptr != NULL) {
938 		spin_lock(lock_ptr);
939 		/*
940 		 * q->lock_ptr can change between reading it and
941 		 * spin_lock(), causing us to take the wrong lock.  This
942 		 * corrects the race condition.
943 		 *
944 		 * Reasoning goes like this: if we have the wrong lock,
945 		 * q->lock_ptr must have changed (maybe several times)
946 		 * between reading it and the spin_lock().  It can
947 		 * change again after the spin_lock() but only if it was
948 		 * already changed before the spin_lock().  It cannot,
949 		 * however, change back to the original value.  Therefore
950 		 * we can detect whether we acquired the correct lock.
951 		 */
952 		if (unlikely(lock_ptr != q->lock_ptr)) {
953 			spin_unlock(lock_ptr);
954 			goto retry;
955 		}
956 		__futex_unqueue(q);
957 
958 		BUG_ON(q->pi_state);
959 
960 		spin_unlock(lock_ptr);
961 		ret = 1;
962 	}
963 
964 	return ret;
965 }
966 
futex_q_lockptr_lock(struct futex_q * q)967 void futex_q_lockptr_lock(struct futex_q *q)
968 {
969 	spinlock_t *lock_ptr;
970 
971 	/*
972 	 * See futex_unqueue() why lock_ptr can change.
973 	 */
974 	guard(rcu)();
975 retry:
976 	lock_ptr = READ_ONCE(q->lock_ptr);
977 	spin_lock(lock_ptr);
978 
979 	if (unlikely(lock_ptr != q->lock_ptr)) {
980 		spin_unlock(lock_ptr);
981 		goto retry;
982 	}
983 }
984 
985 /*
986  * PI futexes can not be requeued and must remove themselves from the hash
987  * bucket. The hash bucket lock (i.e. lock_ptr) is held.
988  */
futex_unqueue_pi(struct futex_q * q)989 void futex_unqueue_pi(struct futex_q *q)
990 {
991 	/*
992 	 * If the lock was not acquired (due to timeout or signal) then the
993 	 * rt_waiter is removed before futex_q is. If this is observed by
994 	 * an unlocker after dropping the rtmutex wait lock and before
995 	 * acquiring the hash bucket lock, then the unlocker dequeues the
996 	 * futex_q from the hash bucket list to guarantee consistent state
997 	 * vs. userspace. Therefore the dequeue here must be conditional.
998 	 */
999 	if (!plist_node_empty(&q->list))
1000 		__futex_unqueue(q);
1001 
1002 	BUG_ON(!q->pi_state);
1003 	put_pi_state(q->pi_state);
1004 	q->pi_state = NULL;
1005 }
1006 
1007 /* Constants for the pending_op argument of handle_futex_death */
1008 #define HANDLE_DEATH_PENDING	true
1009 #define HANDLE_DEATH_LIST	false
1010 
1011 /*
1012  * Process a futex-list entry, check whether it's owned by the
1013  * dying task, and do notification if so:
1014  */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)1015 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
1016 			      bool pi, bool pending_op)
1017 {
1018 	u32 uval, nval, mval;
1019 	pid_t owner;
1020 	int err;
1021 
1022 	/* Futex address must be 32bit aligned */
1023 	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
1024 		return -1;
1025 
1026 retry:
1027 	if (get_user(uval, uaddr))
1028 		return -1;
1029 
1030 	/*
1031 	 * Special case for regular (non PI) futexes. The unlock path in
1032 	 * user space has two race scenarios:
1033 	 *
1034 	 * 1. The unlock path releases the user space futex value and
1035 	 *    before it can execute the futex() syscall to wake up
1036 	 *    waiters it is killed.
1037 	 *
1038 	 * 2. A woken up waiter is killed before it can acquire the
1039 	 *    futex in user space.
1040 	 *
1041 	 * In the second case, the wake up notification could be generated
1042 	 * by the unlock path in user space after setting the futex value
1043 	 * to zero or by the kernel after setting the OWNER_DIED bit below.
1044 	 *
1045 	 * In both cases the TID validation below prevents a wakeup of
1046 	 * potential waiters which can cause these waiters to block
1047 	 * forever.
1048 	 *
1049 	 * In both cases the following conditions are met:
1050 	 *
1051 	 *	1) task->robust_list->list_op_pending != NULL
1052 	 *	   @pending_op == true
1053 	 *	2) The owner part of user space futex value == 0
1054 	 *	3) Regular futex: @pi == false
1055 	 *
1056 	 * If these conditions are met, it is safe to attempt waking up a
1057 	 * potential waiter without touching the user space futex value and
1058 	 * trying to set the OWNER_DIED bit. If the futex value is zero,
1059 	 * the rest of the user space mutex state is consistent, so a woken
1060 	 * waiter will just take over the uncontended futex. Setting the
1061 	 * OWNER_DIED bit would create inconsistent state and malfunction
1062 	 * of the user space owner died handling. Otherwise, the OWNER_DIED
1063 	 * bit is already set, and the woken waiter is expected to deal with
1064 	 * this.
1065 	 */
1066 	owner = uval & FUTEX_TID_MASK;
1067 
1068 	if (pending_op && !pi && !owner) {
1069 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1070 			   FUTEX_BITSET_MATCH_ANY);
1071 		return 0;
1072 	}
1073 
1074 	if (owner != task_pid_vnr(curr))
1075 		return 0;
1076 
1077 	/*
1078 	 * Ok, this dying thread is truly holding a futex
1079 	 * of interest. Set the OWNER_DIED bit atomically
1080 	 * via cmpxchg, and if the value had FUTEX_WAITERS
1081 	 * set, wake up a waiter (if any). (We have to do a
1082 	 * futex_wake() even if OWNER_DIED is already set -
1083 	 * to handle the rare but possible case of recursive
1084 	 * thread-death.) The rest of the cleanup is done in
1085 	 * userspace.
1086 	 */
1087 	mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1088 
1089 	/*
1090 	 * We are not holding a lock here, but we want to have
1091 	 * the pagefault_disable/enable() protection because
1092 	 * we want to handle the fault gracefully. If the
1093 	 * access fails we try to fault in the futex with R/W
1094 	 * verification via get_user_pages. get_user() above
1095 	 * does not guarantee R/W access. If that fails we
1096 	 * give up and leave the futex locked.
1097 	 */
1098 	if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
1099 		switch (err) {
1100 		case -EFAULT:
1101 			if (fault_in_user_writeable(uaddr))
1102 				return -1;
1103 			goto retry;
1104 
1105 		case -EAGAIN:
1106 			cond_resched();
1107 			goto retry;
1108 
1109 		default:
1110 			WARN_ON_ONCE(1);
1111 			return err;
1112 		}
1113 	}
1114 
1115 	if (nval != uval)
1116 		goto retry;
1117 
1118 	/*
1119 	 * Wake robust non-PI futexes here. The wakeup of
1120 	 * PI futexes happens in exit_pi_state():
1121 	 */
1122 	if (!pi && (uval & FUTEX_WAITERS)) {
1123 		futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
1124 			   FUTEX_BITSET_MATCH_ANY);
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 /*
1131  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1132  */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)1133 static inline int fetch_robust_entry(struct robust_list __user **entry,
1134 				     struct robust_list __user * __user *head,
1135 				     unsigned int *pi)
1136 {
1137 	unsigned long uentry;
1138 
1139 	if (get_user(uentry, (unsigned long __user *)head))
1140 		return -EFAULT;
1141 
1142 	*entry = (void __user *)(uentry & ~1UL);
1143 	*pi = uentry & 1;
1144 
1145 	return 0;
1146 }
1147 
1148 /*
1149  * Walk curr->robust_list (very carefully, it's a userspace list!)
1150  * and mark any locks found there dead, and notify any waiters.
1151  *
1152  * We silently return on any sign of list-walking problem.
1153  */
exit_robust_list(struct task_struct * curr)1154 static void exit_robust_list(struct task_struct *curr)
1155 {
1156 	struct robust_list_head __user *head = curr->robust_list;
1157 	struct robust_list __user *entry, *next_entry, *pending;
1158 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1159 	unsigned int next_pi;
1160 	unsigned long futex_offset;
1161 	int rc;
1162 
1163 	/*
1164 	 * Fetch the list head (which was registered earlier, via
1165 	 * sys_set_robust_list()):
1166 	 */
1167 	if (fetch_robust_entry(&entry, &head->list.next, &pi))
1168 		return;
1169 	/*
1170 	 * Fetch the relative futex offset:
1171 	 */
1172 	if (get_user(futex_offset, &head->futex_offset))
1173 		return;
1174 	/*
1175 	 * Fetch any possibly pending lock-add first, and handle it
1176 	 * if it exists:
1177 	 */
1178 	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1179 		return;
1180 
1181 	next_entry = NULL;	/* avoid warning with gcc */
1182 	while (entry != &head->list) {
1183 		/*
1184 		 * Fetch the next entry in the list before calling
1185 		 * handle_futex_death:
1186 		 */
1187 		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1188 		/*
1189 		 * A pending lock might already be on the list, so
1190 		 * don't process it twice:
1191 		 */
1192 		if (entry != pending) {
1193 			if (handle_futex_death((void __user *)entry + futex_offset,
1194 						curr, pi, HANDLE_DEATH_LIST))
1195 				return;
1196 		}
1197 		if (rc)
1198 			return;
1199 		entry = next_entry;
1200 		pi = next_pi;
1201 		/*
1202 		 * Avoid excessively long or circular lists:
1203 		 */
1204 		if (!--limit)
1205 			break;
1206 
1207 		cond_resched();
1208 	}
1209 
1210 	if (pending) {
1211 		handle_futex_death((void __user *)pending + futex_offset,
1212 				   curr, pip, HANDLE_DEATH_PENDING);
1213 	}
1214 }
1215 
1216 #ifdef CONFIG_COMPAT
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)1217 static void __user *futex_uaddr(struct robust_list __user *entry,
1218 				compat_long_t futex_offset)
1219 {
1220 	compat_uptr_t base = ptr_to_compat(entry);
1221 	void __user *uaddr = compat_ptr(base + futex_offset);
1222 
1223 	return uaddr;
1224 }
1225 
1226 /*
1227  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1228  */
1229 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)1230 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
1231 		   compat_uptr_t __user *head, unsigned int *pi)
1232 {
1233 	if (get_user(*uentry, head))
1234 		return -EFAULT;
1235 
1236 	*entry = compat_ptr((*uentry) & ~1);
1237 	*pi = (unsigned int)(*uentry) & 1;
1238 
1239 	return 0;
1240 }
1241 
1242 /*
1243  * Walk curr->robust_list (very carefully, it's a userspace list!)
1244  * and mark any locks found there dead, and notify any waiters.
1245  *
1246  * We silently return on any sign of list-walking problem.
1247  */
compat_exit_robust_list(struct task_struct * curr)1248 static void compat_exit_robust_list(struct task_struct *curr)
1249 {
1250 	struct compat_robust_list_head __user *head = curr->compat_robust_list;
1251 	struct robust_list __user *entry, *next_entry, *pending;
1252 	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1253 	unsigned int next_pi;
1254 	compat_uptr_t uentry, next_uentry, upending;
1255 	compat_long_t futex_offset;
1256 	int rc;
1257 
1258 	/*
1259 	 * Fetch the list head (which was registered earlier, via
1260 	 * sys_set_robust_list()):
1261 	 */
1262 	if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
1263 		return;
1264 	/*
1265 	 * Fetch the relative futex offset:
1266 	 */
1267 	if (get_user(futex_offset, &head->futex_offset))
1268 		return;
1269 	/*
1270 	 * Fetch any possibly pending lock-add first, and handle it
1271 	 * if it exists:
1272 	 */
1273 	if (compat_fetch_robust_entry(&upending, &pending,
1274 			       &head->list_op_pending, &pip))
1275 		return;
1276 
1277 	next_entry = NULL;	/* avoid warning with gcc */
1278 	while (entry != (struct robust_list __user *) &head->list) {
1279 		/*
1280 		 * Fetch the next entry in the list before calling
1281 		 * handle_futex_death:
1282 		 */
1283 		rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
1284 			(compat_uptr_t __user *)&entry->next, &next_pi);
1285 		/*
1286 		 * A pending lock might already be on the list, so
1287 		 * dont process it twice:
1288 		 */
1289 		if (entry != pending) {
1290 			void __user *uaddr = futex_uaddr(entry, futex_offset);
1291 
1292 			if (handle_futex_death(uaddr, curr, pi,
1293 					       HANDLE_DEATH_LIST))
1294 				return;
1295 		}
1296 		if (rc)
1297 			return;
1298 		uentry = next_uentry;
1299 		entry = next_entry;
1300 		pi = next_pi;
1301 		/*
1302 		 * Avoid excessively long or circular lists:
1303 		 */
1304 		if (!--limit)
1305 			break;
1306 
1307 		cond_resched();
1308 	}
1309 	if (pending) {
1310 		void __user *uaddr = futex_uaddr(pending, futex_offset);
1311 
1312 		handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
1313 	}
1314 }
1315 #endif
1316 
1317 #ifdef CONFIG_FUTEX_PI
1318 
1319 /*
1320  * This task is holding PI mutexes at exit time => bad.
1321  * Kernel cleans up PI-state, but userspace is likely hosed.
1322  * (Robust-futex cleanup is separate and might save the day for userspace.)
1323  */
exit_pi_state_list(struct task_struct * curr)1324 static void exit_pi_state_list(struct task_struct *curr)
1325 {
1326 	struct list_head *next, *head = &curr->pi_state_list;
1327 	struct futex_pi_state *pi_state;
1328 	union futex_key key = FUTEX_KEY_INIT;
1329 
1330 	/*
1331 	 * The mutex mm_struct::futex_hash_lock might be acquired.
1332 	 */
1333 	might_sleep();
1334 	/*
1335 	 * Ensure the hash remains stable (no resize) during the while loop
1336 	 * below. The hb pointer is acquired under the pi_lock so we can't block
1337 	 * on the mutex.
1338 	 */
1339 	WARN_ON(curr != current);
1340 	guard(private_hash)();
1341 	/*
1342 	 * We are a ZOMBIE and nobody can enqueue itself on
1343 	 * pi_state_list anymore, but we have to be careful
1344 	 * versus waiters unqueueing themselves:
1345 	 */
1346 	raw_spin_lock_irq(&curr->pi_lock);
1347 	while (!list_empty(head)) {
1348 		next = head->next;
1349 		pi_state = list_entry(next, struct futex_pi_state, list);
1350 		key = pi_state->key;
1351 		if (1) {
1352 			CLASS(hb, hb)(&key);
1353 
1354 			/*
1355 			 * We can race against put_pi_state() removing itself from the
1356 			 * list (a waiter going away). put_pi_state() will first
1357 			 * decrement the reference count and then modify the list, so
1358 			 * its possible to see the list entry but fail this reference
1359 			 * acquire.
1360 			 *
1361 			 * In that case; drop the locks to let put_pi_state() make
1362 			 * progress and retry the loop.
1363 			 */
1364 			if (!refcount_inc_not_zero(&pi_state->refcount)) {
1365 				raw_spin_unlock_irq(&curr->pi_lock);
1366 				cpu_relax();
1367 				raw_spin_lock_irq(&curr->pi_lock);
1368 				continue;
1369 			}
1370 			raw_spin_unlock_irq(&curr->pi_lock);
1371 
1372 			spin_lock(&hb->lock);
1373 			raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1374 			raw_spin_lock(&curr->pi_lock);
1375 			/*
1376 			 * We dropped the pi-lock, so re-check whether this
1377 			 * task still owns the PI-state:
1378 			 */
1379 			if (head->next != next) {
1380 				/* retain curr->pi_lock for the loop invariant */
1381 				raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1382 				spin_unlock(&hb->lock);
1383 				put_pi_state(pi_state);
1384 				continue;
1385 			}
1386 
1387 			WARN_ON(pi_state->owner != curr);
1388 			WARN_ON(list_empty(&pi_state->list));
1389 			list_del_init(&pi_state->list);
1390 			pi_state->owner = NULL;
1391 
1392 			raw_spin_unlock(&curr->pi_lock);
1393 			raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1394 			spin_unlock(&hb->lock);
1395 		}
1396 
1397 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
1398 		put_pi_state(pi_state);
1399 
1400 		raw_spin_lock_irq(&curr->pi_lock);
1401 	}
1402 	raw_spin_unlock_irq(&curr->pi_lock);
1403 }
1404 #else
exit_pi_state_list(struct task_struct * curr)1405 static inline void exit_pi_state_list(struct task_struct *curr) { }
1406 #endif
1407 
futex_cleanup(struct task_struct * tsk)1408 static void futex_cleanup(struct task_struct *tsk)
1409 {
1410 	if (unlikely(tsk->robust_list)) {
1411 		exit_robust_list(tsk);
1412 		tsk->robust_list = NULL;
1413 	}
1414 
1415 #ifdef CONFIG_COMPAT
1416 	if (unlikely(tsk->compat_robust_list)) {
1417 		compat_exit_robust_list(tsk);
1418 		tsk->compat_robust_list = NULL;
1419 	}
1420 #endif
1421 
1422 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1423 		exit_pi_state_list(tsk);
1424 }
1425 
1426 /**
1427  * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1428  * @tsk:	task to set the state on
1429  *
1430  * Set the futex exit state of the task lockless. The futex waiter code
1431  * observes that state when a task is exiting and loops until the task has
1432  * actually finished the futex cleanup. The worst case for this is that the
1433  * waiter runs through the wait loop until the state becomes visible.
1434  *
1435  * This is called from the recursive fault handling path in make_task_dead().
1436  *
1437  * This is best effort. Either the futex exit code has run already or
1438  * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1439  * take it over. If not, the problem is pushed back to user space. If the
1440  * futex exit code did not run yet, then an already queued waiter might
1441  * block forever, but there is nothing which can be done about that.
1442  */
futex_exit_recursive(struct task_struct * tsk)1443 void futex_exit_recursive(struct task_struct *tsk)
1444 {
1445 	/* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1446 	if (tsk->futex_state == FUTEX_STATE_EXITING)
1447 		mutex_unlock(&tsk->futex_exit_mutex);
1448 	tsk->futex_state = FUTEX_STATE_DEAD;
1449 }
1450 
futex_cleanup_begin(struct task_struct * tsk)1451 static void futex_cleanup_begin(struct task_struct *tsk)
1452 {
1453 	/*
1454 	 * Prevent various race issues against a concurrent incoming waiter
1455 	 * including live locks by forcing the waiter to block on
1456 	 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1457 	 * attach_to_pi_owner().
1458 	 */
1459 	mutex_lock(&tsk->futex_exit_mutex);
1460 
1461 	/*
1462 	 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1463 	 *
1464 	 * This ensures that all subsequent checks of tsk->futex_state in
1465 	 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1466 	 * tsk->pi_lock held.
1467 	 *
1468 	 * It guarantees also that a pi_state which was queued right before
1469 	 * the state change under tsk->pi_lock by a concurrent waiter must
1470 	 * be observed in exit_pi_state_list().
1471 	 */
1472 	raw_spin_lock_irq(&tsk->pi_lock);
1473 	tsk->futex_state = FUTEX_STATE_EXITING;
1474 	raw_spin_unlock_irq(&tsk->pi_lock);
1475 }
1476 
futex_cleanup_end(struct task_struct * tsk,int state)1477 static void futex_cleanup_end(struct task_struct *tsk, int state)
1478 {
1479 	/*
1480 	 * Lockless store. The only side effect is that an observer might
1481 	 * take another loop until it becomes visible.
1482 	 */
1483 	tsk->futex_state = state;
1484 	/*
1485 	 * Drop the exit protection. This unblocks waiters which observed
1486 	 * FUTEX_STATE_EXITING to reevaluate the state.
1487 	 */
1488 	mutex_unlock(&tsk->futex_exit_mutex);
1489 }
1490 
futex_exec_release(struct task_struct * tsk)1491 void futex_exec_release(struct task_struct *tsk)
1492 {
1493 	/*
1494 	 * The state handling is done for consistency, but in the case of
1495 	 * exec() there is no way to prevent further damage as the PID stays
1496 	 * the same. But for the unlikely and arguably buggy case that a
1497 	 * futex is held on exec(), this provides at least as much state
1498 	 * consistency protection which is possible.
1499 	 */
1500 	futex_cleanup_begin(tsk);
1501 	futex_cleanup(tsk);
1502 	/*
1503 	 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1504 	 * exec a new binary.
1505 	 */
1506 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
1507 }
1508 
futex_exit_release(struct task_struct * tsk)1509 void futex_exit_release(struct task_struct *tsk)
1510 {
1511 	futex_cleanup_begin(tsk);
1512 	futex_cleanup(tsk);
1513 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1514 }
1515 
futex_hash_bucket_init(struct futex_hash_bucket * fhb,struct futex_private_hash * fph)1516 static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
1517 				   struct futex_private_hash *fph)
1518 {
1519 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1520 	fhb->priv = fph;
1521 #endif
1522 	atomic_set(&fhb->waiters, 0);
1523 	plist_head_init(&fhb->chain);
1524 	spin_lock_init(&fhb->lock);
1525 }
1526 
1527 #define FH_CUSTOM	0x01
1528 
1529 #ifdef CONFIG_FUTEX_PRIVATE_HASH
1530 
1531 /*
1532  * futex-ref
1533  *
1534  * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that
1535  * code because it just doesn't fit right.
1536  *
1537  * Dual counter, per-cpu / atomic approach like percpu-refcount, except it
1538  * re-initializes the state automatically, such that the fph swizzle is also a
1539  * transition back to per-cpu.
1540  */
1541 
1542 static void futex_ref_rcu(struct rcu_head *head);
1543 
__futex_ref_atomic_begin(struct futex_private_hash * fph)1544 static void __futex_ref_atomic_begin(struct futex_private_hash *fph)
1545 {
1546 	struct mm_struct *mm = fph->mm;
1547 
1548 	/*
1549 	 * The counter we're about to switch to must have fully switched;
1550 	 * otherwise it would be impossible for it to have reported success
1551 	 * from futex_ref_is_dead().
1552 	 */
1553 	WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
1554 
1555 	/*
1556 	 * Set the atomic to the bias value such that futex_ref_{get,put}()
1557 	 * will never observe 0. Will be fixed up in __futex_ref_atomic_end()
1558 	 * when folding in the percpu count.
1559 	 */
1560 	atomic_long_set(&mm->futex_atomic, LONG_MAX);
1561 	smp_store_release(&fph->state, FR_ATOMIC);
1562 
1563 	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
1564 }
1565 
__futex_ref_atomic_end(struct futex_private_hash * fph)1566 static void __futex_ref_atomic_end(struct futex_private_hash *fph)
1567 {
1568 	struct mm_struct *mm = fph->mm;
1569 	unsigned int count = 0;
1570 	long ret;
1571 	int cpu;
1572 
1573 	/*
1574 	 * Per __futex_ref_atomic_begin() the state of the fph must be ATOMIC
1575 	 * and per this RCU callback, everybody must now observe this state and
1576 	 * use the atomic variable.
1577 	 */
1578 	WARN_ON_ONCE(fph->state != FR_ATOMIC);
1579 
1580 	/*
1581 	 * Therefore the per-cpu counter is now stable, sum and reset.
1582 	 */
1583 	for_each_possible_cpu(cpu) {
1584 		unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
1585 		count += *ptr;
1586 		*ptr = 0;
1587 	}
1588 
1589 	/*
1590 	 * Re-init for the next cycle.
1591 	 */
1592 	this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
1593 
1594 	/*
1595 	 * Add actual count, subtract bias and initial refcount.
1596 	 *
1597 	 * The moment this atomic operation happens, futex_ref_is_dead() can
1598 	 * become true.
1599 	 */
1600 	ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
1601 	if (!ret)
1602 		wake_up_var(mm);
1603 
1604 	WARN_ON_ONCE(ret < 0);
1605 	mmput_async(mm);
1606 }
1607 
futex_ref_rcu(struct rcu_head * head)1608 static void futex_ref_rcu(struct rcu_head *head)
1609 {
1610 	struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
1611 	struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
1612 
1613 	if (fph->state == FR_PERCPU) {
1614 		/*
1615 		 * Per this extra grace-period, everybody must now observe
1616 		 * fph as the current fph and no previously observed fph's
1617 		 * are in-flight.
1618 		 *
1619 		 * Notably, nobody will now rely on the atomic
1620 		 * futex_ref_is_dead() state anymore so we can begin the
1621 		 * migration of the per-cpu counter into the atomic.
1622 		 */
1623 		__futex_ref_atomic_begin(fph);
1624 		return;
1625 	}
1626 
1627 	__futex_ref_atomic_end(fph);
1628 }
1629 
1630 /*
1631  * Drop the initial refcount and transition to atomics.
1632  */
futex_ref_drop(struct futex_private_hash * fph)1633 static void futex_ref_drop(struct futex_private_hash *fph)
1634 {
1635 	struct mm_struct *mm = fph->mm;
1636 
1637 	/*
1638 	 * Can only transition the current fph;
1639 	 */
1640 	WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
1641 	/*
1642 	 * We enqueue at least one RCU callback. Ensure mm stays if the task
1643 	 * exits before the transition is completed.
1644 	 */
1645 	mmget(mm);
1646 
1647 	/*
1648 	 * In order to avoid the following scenario:
1649 	 *
1650 	 * futex_hash()			__futex_pivot_hash()
1651 	 *   guard(rcu);		  guard(mm->futex_hash_lock);
1652 	 *   fph = mm->futex_phash;
1653 	 *				  rcu_assign_pointer(&mm->futex_phash, new);
1654 	 *				futex_hash_allocate()
1655 	 *				  futex_ref_drop()
1656 	 *				    fph->state = FR_ATOMIC;
1657 	 *				    atomic_set(, BIAS);
1658 	 *
1659 	 *   futex_private_hash_get(fph); // OOPS
1660 	 *
1661 	 * Where an old fph (which is FR_ATOMIC) and should fail on
1662 	 * inc_not_zero, will succeed because a new transition is started and
1663 	 * the atomic is bias'ed away from 0.
1664 	 *
1665 	 * There must be at least one full grace-period between publishing a
1666 	 * new fph and trying to replace it.
1667 	 */
1668 	if (poll_state_synchronize_rcu(mm->futex_batches)) {
1669 		/*
1670 		 * There was a grace-period, we can begin now.
1671 		 */
1672 		__futex_ref_atomic_begin(fph);
1673 		return;
1674 	}
1675 
1676 	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
1677 }
1678 
futex_ref_get(struct futex_private_hash * fph)1679 static bool futex_ref_get(struct futex_private_hash *fph)
1680 {
1681 	struct mm_struct *mm = fph->mm;
1682 
1683 	guard(rcu)();
1684 
1685 	if (smp_load_acquire(&fph->state) == FR_PERCPU) {
1686 		this_cpu_inc(*mm->futex_ref);
1687 		return true;
1688 	}
1689 
1690 	return atomic_long_inc_not_zero(&mm->futex_atomic);
1691 }
1692 
futex_ref_put(struct futex_private_hash * fph)1693 static bool futex_ref_put(struct futex_private_hash *fph)
1694 {
1695 	struct mm_struct *mm = fph->mm;
1696 
1697 	guard(rcu)();
1698 
1699 	if (smp_load_acquire(&fph->state) == FR_PERCPU) {
1700 		this_cpu_dec(*mm->futex_ref);
1701 		return false;
1702 	}
1703 
1704 	return atomic_long_dec_and_test(&mm->futex_atomic);
1705 }
1706 
futex_ref_is_dead(struct futex_private_hash * fph)1707 static bool futex_ref_is_dead(struct futex_private_hash *fph)
1708 {
1709 	struct mm_struct *mm = fph->mm;
1710 
1711 	guard(rcu)();
1712 
1713 	if (smp_load_acquire(&fph->state) == FR_PERCPU)
1714 		return false;
1715 
1716 	return atomic_long_read(&mm->futex_atomic) == 0;
1717 }
1718 
futex_mm_init(struct mm_struct * mm)1719 int futex_mm_init(struct mm_struct *mm)
1720 {
1721 	mutex_init(&mm->futex_hash_lock);
1722 	RCU_INIT_POINTER(mm->futex_phash, NULL);
1723 	mm->futex_phash_new = NULL;
1724 	/* futex-ref */
1725 	atomic_long_set(&mm->futex_atomic, 0);
1726 	mm->futex_batches = get_state_synchronize_rcu();
1727 	mm->futex_ref = alloc_percpu(unsigned int);
1728 	if (!mm->futex_ref)
1729 		return -ENOMEM;
1730 	this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
1731 	return 0;
1732 }
1733 
futex_hash_free(struct mm_struct * mm)1734 void futex_hash_free(struct mm_struct *mm)
1735 {
1736 	struct futex_private_hash *fph;
1737 
1738 	free_percpu(mm->futex_ref);
1739 	kvfree(mm->futex_phash_new);
1740 	fph = rcu_dereference_raw(mm->futex_phash);
1741 	if (fph)
1742 		kvfree(fph);
1743 }
1744 
futex_pivot_pending(struct mm_struct * mm)1745 static bool futex_pivot_pending(struct mm_struct *mm)
1746 {
1747 	struct futex_private_hash *fph;
1748 
1749 	guard(rcu)();
1750 
1751 	if (!mm->futex_phash_new)
1752 		return true;
1753 
1754 	fph = rcu_dereference(mm->futex_phash);
1755 	return futex_ref_is_dead(fph);
1756 }
1757 
futex_hash_less(struct futex_private_hash * a,struct futex_private_hash * b)1758 static bool futex_hash_less(struct futex_private_hash *a,
1759 			    struct futex_private_hash *b)
1760 {
1761 	/* user provided always wins */
1762 	if (!a->custom && b->custom)
1763 		return true;
1764 	if (a->custom && !b->custom)
1765 		return false;
1766 
1767 	/* zero-sized hash wins */
1768 	if (!b->hash_mask)
1769 		return true;
1770 	if (!a->hash_mask)
1771 		return false;
1772 
1773 	/* keep the biggest */
1774 	if (a->hash_mask < b->hash_mask)
1775 		return true;
1776 	if (a->hash_mask > b->hash_mask)
1777 		return false;
1778 
1779 	return false; /* equal */
1780 }
1781 
futex_hash_allocate(unsigned int hash_slots,unsigned int flags)1782 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1783 {
1784 	struct mm_struct *mm = current->mm;
1785 	struct futex_private_hash *fph;
1786 	bool custom = flags & FH_CUSTOM;
1787 	int i;
1788 
1789 	if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots)))
1790 		return -EINVAL;
1791 
1792 	/*
1793 	 * Once we've disabled the global hash there is no way back.
1794 	 */
1795 	scoped_guard(rcu) {
1796 		fph = rcu_dereference(mm->futex_phash);
1797 		if (fph && !fph->hash_mask) {
1798 			if (custom)
1799 				return -EBUSY;
1800 			return 0;
1801 		}
1802 	}
1803 
1804 	fph = kvzalloc(struct_size(fph, queues, hash_slots),
1805 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1806 	if (!fph)
1807 		return -ENOMEM;
1808 
1809 	fph->hash_mask = hash_slots ? hash_slots - 1 : 0;
1810 	fph->custom = custom;
1811 	fph->mm = mm;
1812 
1813 	for (i = 0; i < hash_slots; i++)
1814 		futex_hash_bucket_init(&fph->queues[i], fph);
1815 
1816 	if (custom) {
1817 		/*
1818 		 * Only let prctl() wait / retry; don't unduly delay clone().
1819 		 */
1820 again:
1821 		wait_var_event(mm, futex_pivot_pending(mm));
1822 	}
1823 
1824 	scoped_guard(mutex, &mm->futex_hash_lock) {
1825 		struct futex_private_hash *free __free(kvfree) = NULL;
1826 		struct futex_private_hash *cur, *new;
1827 
1828 		cur = rcu_dereference_protected(mm->futex_phash,
1829 						lockdep_is_held(&mm->futex_hash_lock));
1830 		new = mm->futex_phash_new;
1831 		mm->futex_phash_new = NULL;
1832 
1833 		if (fph) {
1834 			if (cur && !cur->hash_mask) {
1835 				/*
1836 				 * If two threads simultaneously request the global
1837 				 * hash then the first one performs the switch,
1838 				 * the second one returns here.
1839 				 */
1840 				free = fph;
1841 				mm->futex_phash_new = new;
1842 				return -EBUSY;
1843 			}
1844 			if (cur && !new) {
1845 				/*
1846 				 * If we have an existing hash, but do not yet have
1847 				 * allocated a replacement hash, drop the initial
1848 				 * reference on the existing hash.
1849 				 */
1850 				futex_ref_drop(cur);
1851 			}
1852 
1853 			if (new) {
1854 				/*
1855 				 * Two updates raced; throw out the lesser one.
1856 				 */
1857 				if (futex_hash_less(new, fph)) {
1858 					free = new;
1859 					new = fph;
1860 				} else {
1861 					free = fph;
1862 				}
1863 			} else {
1864 				new = fph;
1865 			}
1866 			fph = NULL;
1867 		}
1868 
1869 		if (new) {
1870 			/*
1871 			 * Will set mm->futex_phash_new on failure;
1872 			 * futex_private_hash_get() will try again.
1873 			 */
1874 			if (!__futex_pivot_hash(mm, new) && custom)
1875 				goto again;
1876 		}
1877 	}
1878 	return 0;
1879 }
1880 
futex_hash_allocate_default(void)1881 int futex_hash_allocate_default(void)
1882 {
1883 	unsigned int threads, buckets, current_buckets = 0;
1884 	struct futex_private_hash *fph;
1885 
1886 	if (!current->mm)
1887 		return 0;
1888 
1889 	scoped_guard(rcu) {
1890 		threads = min_t(unsigned int,
1891 				get_nr_threads(current),
1892 				num_online_cpus());
1893 
1894 		fph = rcu_dereference(current->mm->futex_phash);
1895 		if (fph) {
1896 			if (fph->custom)
1897 				return 0;
1898 
1899 			current_buckets = fph->hash_mask + 1;
1900 		}
1901 	}
1902 
1903 	/*
1904 	 * The default allocation will remain within
1905 	 *   16 <= threads * 4 <= global hash size
1906 	 */
1907 	buckets = roundup_pow_of_two(4 * threads);
1908 	buckets = clamp(buckets, 16, futex_hashmask + 1);
1909 
1910 	if (current_buckets >= buckets)
1911 		return 0;
1912 
1913 	return futex_hash_allocate(buckets, 0);
1914 }
1915 
futex_hash_get_slots(void)1916 static int futex_hash_get_slots(void)
1917 {
1918 	struct futex_private_hash *fph;
1919 
1920 	guard(rcu)();
1921 	fph = rcu_dereference(current->mm->futex_phash);
1922 	if (fph && fph->hash_mask)
1923 		return fph->hash_mask + 1;
1924 	return 0;
1925 }
1926 
1927 #else
1928 
futex_hash_allocate(unsigned int hash_slots,unsigned int flags)1929 static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
1930 {
1931 	return -EINVAL;
1932 }
1933 
futex_hash_get_slots(void)1934 static int futex_hash_get_slots(void)
1935 {
1936 	return 0;
1937 }
1938 
1939 #endif
1940 
futex_hash_prctl(unsigned long arg2,unsigned long arg3,unsigned long arg4)1941 int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
1942 {
1943 	unsigned int flags = FH_CUSTOM;
1944 	int ret;
1945 
1946 	switch (arg2) {
1947 	case PR_FUTEX_HASH_SET_SLOTS:
1948 		if (arg4)
1949 			return -EINVAL;
1950 		ret = futex_hash_allocate(arg3, flags);
1951 		break;
1952 
1953 	case PR_FUTEX_HASH_GET_SLOTS:
1954 		ret = futex_hash_get_slots();
1955 		break;
1956 
1957 	default:
1958 		ret = -EINVAL;
1959 		break;
1960 	}
1961 	return ret;
1962 }
1963 
futex_init(void)1964 static int __init futex_init(void)
1965 {
1966 	unsigned long hashsize, i;
1967 	unsigned int order, n;
1968 	unsigned long size;
1969 
1970 #ifdef CONFIG_BASE_SMALL
1971 	hashsize = 16;
1972 #else
1973 	hashsize = 256 * num_possible_cpus();
1974 	hashsize /= num_possible_nodes();
1975 	hashsize = max(4, hashsize);
1976 	hashsize = roundup_pow_of_two(hashsize);
1977 #endif
1978 	futex_hashshift = ilog2(hashsize);
1979 	size = sizeof(struct futex_hash_bucket) * hashsize;
1980 	order = get_order(size);
1981 
1982 	for_each_node(n) {
1983 		struct futex_hash_bucket *table;
1984 
1985 		if (order > MAX_PAGE_ORDER)
1986 			table = vmalloc_huge_node(size, GFP_KERNEL, n);
1987 		else
1988 			table = alloc_pages_exact_nid(n, size, GFP_KERNEL);
1989 
1990 		BUG_ON(!table);
1991 
1992 		for (i = 0; i < hashsize; i++)
1993 			futex_hash_bucket_init(&table[i], NULL);
1994 
1995 		futex_queues[n] = table;
1996 	}
1997 
1998 	futex_hashmask = hashsize - 1;
1999 	pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n",
2000 		hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024,
2001 		order > MAX_PAGE_ORDER ? "vmalloc" : "linear");
2002 	return 0;
2003 }
2004 core_initcall(futex_init);
2005