xref: /linux/mm/mmu_notifier.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/mmu_notifier.c
4  *
5  *  Copyright (C) 2008  Qumranet, Inc.
6  *  Copyright (C) 2008  SGI
7  *             Christoph Lameter <cl@gentwo.org>
8  */
9 
10 #include <linux/rculist.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/export.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/interval_tree.h>
16 #include <linux/srcu.h>
17 #include <linux/rcupdate.h>
18 #include <linux/sched.h>
19 #include <linux/sched/mm.h>
20 #include <linux/slab.h>
21 
22 #include "vma.h"
23 
24 /* global SRCU for all MMs */
25 DEFINE_STATIC_SRCU(srcu);
26 
27 #ifdef CONFIG_LOCKDEP
28 struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
29 	.name = "mmu_notifier_invalidate_range_start"
30 };
31 #endif
32 
33 /*
34  * The mmu_notifier_subscriptions structure is allocated and installed in
35  * mm->notifier_subscriptions inside the mm_take_all_locks() protected
36  * critical section and it's released only when mm_count reaches zero
37  * in mmdrop().
38  */
39 struct mmu_notifier_subscriptions {
40 	/* all mmu notifiers registered in this mm are queued in this list */
41 	struct hlist_head list;
42 	bool has_itree;
43 	/* to serialize the list modifications and hlist_unhashed */
44 	spinlock_t lock;
45 	unsigned long invalidate_seq;
46 	unsigned long active_invalidate_ranges;
47 	struct rb_root_cached itree;
48 	wait_queue_head_t wq;
49 	struct hlist_head deferred_list;
50 };
51 
52 /*
53  * This is a collision-retry read-side/write-side 'lock', a lot like a
54  * seqcount, however this allows multiple write-sides to hold it at
55  * once. Conceptually the write side is protecting the values of the PTEs in
56  * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
57  * writer exists.
58  *
59  * Note that the core mm creates nested invalidate_range_start()/end() regions
60  * within the same thread, and runs invalidate_range_start()/end() in parallel
61  * on multiple CPUs. This is designed to not reduce concurrency or block
62  * progress on the mm side.
63  *
64  * As a secondary function, holding the full write side also serves to prevent
65  * writers for the itree, this is an optimization to avoid extra locking
66  * during invalidate_range_start/end notifiers.
67  *
68  * The write side has two states, fully excluded:
69  *  - mm->active_invalidate_ranges != 0
70  *  - subscriptions->invalidate_seq & 1 == True (odd)
71  *  - some range on the mm_struct is being invalidated
72  *  - the itree is not allowed to change
73  *
74  * And partially excluded:
75  *  - mm->active_invalidate_ranges != 0
76  *  - subscriptions->invalidate_seq & 1 == False (even)
77  *  - some range on the mm_struct is being invalidated
78  *  - the itree is allowed to change
79  *
80  * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
81  *    seq |= 1  # Begin writing
82  *    seq++     # Release the writing state
83  *    seq & 1   # True if a writer exists
84  *
85  * The later state avoids some expensive work on inv_end in the common case of
86  * no mmu_interval_notifier monitoring the VA.
87  */
88 static bool
89 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
90 {
91 	lockdep_assert_held(&subscriptions->lock);
92 	return subscriptions->invalidate_seq & 1;
93 }
94 
95 static struct mmu_interval_notifier *
96 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
97 			 const struct mmu_notifier_range *range,
98 			 unsigned long *seq)
99 {
100 	struct interval_tree_node *node;
101 	struct mmu_interval_notifier *res = NULL;
102 
103 	spin_lock(&subscriptions->lock);
104 	subscriptions->active_invalidate_ranges++;
105 	node = interval_tree_iter_first(&subscriptions->itree, range->start,
106 					range->end - 1);
107 	if (node) {
108 		subscriptions->invalidate_seq |= 1;
109 		res = container_of(node, struct mmu_interval_notifier,
110 				   interval_tree);
111 	}
112 
113 	*seq = subscriptions->invalidate_seq;
114 	spin_unlock(&subscriptions->lock);
115 	return res;
116 }
117 
118 static struct mmu_interval_notifier *
119 mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
120 		  const struct mmu_notifier_range *range)
121 {
122 	struct interval_tree_node *node;
123 
124 	node = interval_tree_iter_next(&interval_sub->interval_tree,
125 				       range->start, range->end - 1);
126 	if (!node)
127 		return NULL;
128 	return container_of(node, struct mmu_interval_notifier, interval_tree);
129 }
130 
131 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
132 {
133 	struct mmu_interval_notifier *interval_sub;
134 	struct hlist_node *next;
135 
136 	spin_lock(&subscriptions->lock);
137 	if (--subscriptions->active_invalidate_ranges ||
138 	    !mn_itree_is_invalidating(subscriptions)) {
139 		spin_unlock(&subscriptions->lock);
140 		return;
141 	}
142 
143 	/* Make invalidate_seq even */
144 	subscriptions->invalidate_seq++;
145 
146 	/*
147 	 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
148 	 * Adds and removes are queued until the final inv_end happens then
149 	 * they are progressed. This arrangement for tree updates is used to
150 	 * avoid using a blocking lock during invalidate_range_start.
151 	 */
152 	hlist_for_each_entry_safe(interval_sub, next,
153 				  &subscriptions->deferred_list,
154 				  deferred_item) {
155 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
156 			interval_tree_insert(&interval_sub->interval_tree,
157 					     &subscriptions->itree);
158 		else
159 			interval_tree_remove(&interval_sub->interval_tree,
160 					     &subscriptions->itree);
161 		hlist_del(&interval_sub->deferred_item);
162 	}
163 	spin_unlock(&subscriptions->lock);
164 
165 	wake_up_all(&subscriptions->wq);
166 }
167 
168 /**
169  * mmu_interval_read_begin - Begin a read side critical section against a VA
170  *                           range
171  * @interval_sub: The interval subscription
172  *
173  * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
174  * collision-retry scheme similar to seqcount for the VA range under
175  * subscription. If the mm invokes invalidation during the critical section
176  * then mmu_interval_read_retry() will return true.
177  *
178  * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
179  * require a blocking context.  The critical region formed by this can sleep,
180  * and the required 'user_lock' can also be a sleeping lock.
181  *
182  * The caller is required to provide a 'user_lock' to serialize both teardown
183  * and setup.
184  *
185  * The return value should be passed to mmu_interval_read_retry().
186  */
187 unsigned long
188 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
189 {
190 	struct mmu_notifier_subscriptions *subscriptions =
191 		interval_sub->mm->notifier_subscriptions;
192 	unsigned long seq;
193 	bool is_invalidating;
194 
195 	/*
196 	 * If the subscription has a different seq value under the user_lock
197 	 * than we started with then it has collided.
198 	 *
199 	 * If the subscription currently has the same seq value as the
200 	 * subscriptions seq, then it is currently between
201 	 * invalidate_start/end and is colliding.
202 	 *
203 	 * The locking looks broadly like this:
204 	 *   mn_itree_inv_start():                 mmu_interval_read_begin():
205 	 *                                         spin_lock
206 	 *                                          seq = READ_ONCE(interval_sub->invalidate_seq);
207 	 *                                          seq == subs->invalidate_seq
208 	 *                                         spin_unlock
209 	 *    spin_lock
210 	 *     seq = ++subscriptions->invalidate_seq
211 	 *    spin_unlock
212 	 *     op->invalidate():
213 	 *       user_lock
214 	 *        mmu_interval_set_seq()
215 	 *         interval_sub->invalidate_seq = seq
216 	 *       user_unlock
217 	 *
218 	 *                          [Required: mmu_interval_read_retry() == true]
219 	 *
220 	 *   mn_itree_inv_end():
221 	 *    spin_lock
222 	 *     seq = ++subscriptions->invalidate_seq
223 	 *    spin_unlock
224 	 *
225 	 *                                        user_lock
226 	 *                                         mmu_interval_read_retry():
227 	 *                                          interval_sub->invalidate_seq != seq
228 	 *                                        user_unlock
229 	 *
230 	 * Barriers are not needed here as any races here are closed by an
231 	 * eventual mmu_interval_read_retry(), which provides a barrier via the
232 	 * user_lock.
233 	 */
234 	spin_lock(&subscriptions->lock);
235 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
236 	seq = READ_ONCE(interval_sub->invalidate_seq);
237 	is_invalidating = seq == subscriptions->invalidate_seq;
238 	spin_unlock(&subscriptions->lock);
239 
240 	/*
241 	 * interval_sub->invalidate_seq must always be set to an odd value via
242 	 * mmu_interval_set_seq() using the provided cur_seq from
243 	 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
244 	 * will always clear the below sleep in some reasonable time as
245 	 * subscriptions->invalidate_seq is even in the idle state.
246 	 */
247 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
248 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
249 	if (is_invalidating)
250 		wait_event(subscriptions->wq,
251 			   READ_ONCE(subscriptions->invalidate_seq) != seq);
252 
253 	/*
254 	 * Notice that mmu_interval_read_retry() can already be true at this
255 	 * point, avoiding loops here allows the caller to provide a global
256 	 * time bound.
257 	 */
258 
259 	return seq;
260 }
261 EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
262 
263 static void mn_itree_finish_pass(struct llist_head *finish_passes)
264 {
265 	struct llist_node *first = llist_reverse_order(__llist_del_all(finish_passes));
266 	struct mmu_interval_notifier_finish *f, *next;
267 
268 	llist_for_each_entry_safe(f, next, first, link)
269 		f->notifier->ops->invalidate_finish(f);
270 }
271 
272 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
273 			     struct mm_struct *mm)
274 {
275 	struct mmu_notifier_range range = {
276 		.flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
277 		.event = MMU_NOTIFY_RELEASE,
278 		.mm = mm,
279 		.start = 0,
280 		.end = ULONG_MAX,
281 	};
282 	struct mmu_interval_notifier *interval_sub;
283 	LLIST_HEAD(finish_passes);
284 	unsigned long cur_seq;
285 	bool ret;
286 
287 	for (interval_sub =
288 		     mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
289 	     interval_sub;
290 	     interval_sub = mn_itree_inv_next(interval_sub, &range)) {
291 		if (interval_sub->ops->invalidate_start) {
292 			struct mmu_interval_notifier_finish *finish = NULL;
293 
294 			ret = interval_sub->ops->invalidate_start(interval_sub,
295 								  &range,
296 								  cur_seq,
297 								  &finish);
298 			if (ret && finish) {
299 				finish->notifier = interval_sub;
300 				__llist_add(&finish->link, &finish_passes);
301 			}
302 
303 		} else {
304 			ret = interval_sub->ops->invalidate(interval_sub,
305 							    &range,
306 							    cur_seq);
307 		}
308 		WARN_ON(!ret);
309 	}
310 
311 	mn_itree_finish_pass(&finish_passes);
312 	mn_itree_inv_end(subscriptions);
313 }
314 
315 /*
316  * This function can't run concurrently against mmu_notifier_register
317  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
318  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
319  * in parallel despite there being no task using this mm any more,
320  * through the vmas outside of the exit_mmap context, such as with
321  * vmtruncate. This serializes against mmu_notifier_unregister with
322  * the notifier_subscriptions->lock in addition to SRCU and it serializes
323  * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
324  * can't go away from under us as exit_mmap holds an mm_count pin
325  * itself.
326  */
327 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
328 			     struct mm_struct *mm)
329 {
330 	struct mmu_notifier *subscription;
331 	int id;
332 
333 	/*
334 	 * SRCU here will block mmu_notifier_unregister until
335 	 * ->release returns.
336 	 */
337 	id = srcu_read_lock(&srcu);
338 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
339 				 srcu_read_lock_held(&srcu))
340 		/*
341 		 * If ->release runs before mmu_notifier_unregister it must be
342 		 * handled, as it's the only way for the driver to flush all
343 		 * existing sptes and stop the driver from establishing any more
344 		 * sptes before all the pages in the mm are freed.
345 		 */
346 		if (subscription->ops->release)
347 			subscription->ops->release(subscription, mm);
348 
349 	spin_lock(&subscriptions->lock);
350 	while (unlikely(!hlist_empty(&subscriptions->list))) {
351 		subscription = hlist_entry(subscriptions->list.first,
352 					   struct mmu_notifier, hlist);
353 		/*
354 		 * We arrived before mmu_notifier_unregister so
355 		 * mmu_notifier_unregister will do nothing other than to wait
356 		 * for ->release to finish and for mmu_notifier_unregister to
357 		 * return.
358 		 */
359 		hlist_del_init_rcu(&subscription->hlist);
360 	}
361 	spin_unlock(&subscriptions->lock);
362 	srcu_read_unlock(&srcu, id);
363 
364 	/*
365 	 * synchronize_srcu here prevents mmu_notifier_release from returning to
366 	 * exit_mmap (which would proceed with freeing all pages in the mm)
367 	 * until the ->release method returns, if it was invoked by
368 	 * mmu_notifier_unregister.
369 	 *
370 	 * The notifier_subscriptions can't go away from under us because
371 	 * one mm_count is held by exit_mmap.
372 	 */
373 	synchronize_srcu(&srcu);
374 }
375 
376 void __mmu_notifier_release(struct mm_struct *mm)
377 {
378 	struct mmu_notifier_subscriptions *subscriptions =
379 		mm->notifier_subscriptions;
380 
381 	if (subscriptions->has_itree)
382 		mn_itree_release(subscriptions, mm);
383 
384 	if (!hlist_empty(&subscriptions->list))
385 		mn_hlist_release(subscriptions, mm);
386 }
387 
388 /*
389  * If no young bitflag is supported by the hardware, ->clear_flush_young can
390  * unmap the address and return 1 or 0 depending if the mapping previously
391  * existed or not.
392  */
393 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
394 					unsigned long start,
395 					unsigned long end)
396 {
397 	struct mmu_notifier *subscription;
398 	int young = 0, id;
399 
400 	id = srcu_read_lock(&srcu);
401 	hlist_for_each_entry_rcu(subscription,
402 				 &mm->notifier_subscriptions->list, hlist,
403 				 srcu_read_lock_held(&srcu)) {
404 		if (subscription->ops->clear_flush_young)
405 			young |= subscription->ops->clear_flush_young(
406 				subscription, mm, start, end);
407 	}
408 	srcu_read_unlock(&srcu, id);
409 
410 	return young;
411 }
412 
413 int __mmu_notifier_clear_young(struct mm_struct *mm,
414 			       unsigned long start,
415 			       unsigned long end)
416 {
417 	struct mmu_notifier *subscription;
418 	int young = 0, id;
419 
420 	id = srcu_read_lock(&srcu);
421 	hlist_for_each_entry_rcu(subscription,
422 				 &mm->notifier_subscriptions->list, hlist,
423 				 srcu_read_lock_held(&srcu)) {
424 		if (subscription->ops->clear_young)
425 			young |= subscription->ops->clear_young(subscription,
426 								mm, start, end);
427 	}
428 	srcu_read_unlock(&srcu, id);
429 
430 	return young;
431 }
432 
433 int __mmu_notifier_test_young(struct mm_struct *mm,
434 			      unsigned long address)
435 {
436 	struct mmu_notifier *subscription;
437 	int young = 0, id;
438 
439 	id = srcu_read_lock(&srcu);
440 	hlist_for_each_entry_rcu(subscription,
441 				 &mm->notifier_subscriptions->list, hlist,
442 				 srcu_read_lock_held(&srcu)) {
443 		if (subscription->ops->test_young) {
444 			young = subscription->ops->test_young(subscription, mm,
445 							      address);
446 			if (young)
447 				break;
448 		}
449 	}
450 	srcu_read_unlock(&srcu, id);
451 
452 	return young;
453 }
454 
455 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
456 			       const struct mmu_notifier_range *range)
457 {
458 	struct mmu_interval_notifier *interval_sub;
459 	LLIST_HEAD(finish_passes);
460 	unsigned long cur_seq;
461 	int err = 0;
462 
463 	for (interval_sub =
464 		     mn_itree_inv_start_range(subscriptions, range, &cur_seq);
465 	     interval_sub;
466 	     interval_sub = mn_itree_inv_next(interval_sub, range)) {
467 		bool ret;
468 
469 		if (interval_sub->ops->invalidate_start) {
470 			struct mmu_interval_notifier_finish *finish = NULL;
471 
472 			ret = interval_sub->ops->invalidate_start(interval_sub,
473 								  range,
474 								  cur_seq,
475 								  &finish);
476 			if (ret && finish) {
477 				finish->notifier = interval_sub;
478 				__llist_add(&finish->link, &finish_passes);
479 			}
480 
481 		} else {
482 			ret = interval_sub->ops->invalidate(interval_sub,
483 							    range,
484 							    cur_seq);
485 		}
486 		if (!ret) {
487 			if (WARN_ON(mmu_notifier_range_blockable(range)))
488 				continue;
489 			err = -EAGAIN;
490 			break;
491 		}
492 	}
493 
494 	mn_itree_finish_pass(&finish_passes);
495 
496 	/*
497 	 * On -EAGAIN the non-blocking caller is not allowed to call
498 	 * invalidate_range_end()
499 	 */
500 	if (err)
501 		mn_itree_inv_end(subscriptions);
502 
503 	return err;
504 }
505 
506 static int mn_hlist_invalidate_range_start(
507 	struct mmu_notifier_subscriptions *subscriptions,
508 	struct mmu_notifier_range *range)
509 {
510 	struct mmu_notifier *subscription;
511 	int ret = 0;
512 	int id;
513 
514 	id = srcu_read_lock(&srcu);
515 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
516 				 srcu_read_lock_held(&srcu)) {
517 		const struct mmu_notifier_ops *ops = subscription->ops;
518 
519 		if (ops->invalidate_range_start) {
520 			int _ret;
521 
522 			if (!mmu_notifier_range_blockable(range))
523 				non_block_start();
524 			_ret = ops->invalidate_range_start(subscription, range);
525 			if (!mmu_notifier_range_blockable(range))
526 				non_block_end();
527 			if (_ret) {
528 				pr_info("%pS callback failed with %d in %sblockable context.\n",
529 					ops->invalidate_range_start, _ret,
530 					!mmu_notifier_range_blockable(range) ?
531 						"non-" :
532 						"");
533 				WARN_ON(mmu_notifier_range_blockable(range) ||
534 					_ret != -EAGAIN);
535 				/*
536 				 * We call all the notifiers on any EAGAIN,
537 				 * there is no way for a notifier to know if
538 				 * its start method failed, thus a start that
539 				 * does EAGAIN can't also do end.
540 				 */
541 				WARN_ON(ops->invalidate_range_end);
542 				ret = _ret;
543 			}
544 		}
545 	}
546 
547 	if (ret) {
548 		/*
549 		 * Must be non-blocking to get here.  If there are multiple
550 		 * notifiers and one or more failed start, any that succeeded
551 		 * start are expecting their end to be called.  Do so now.
552 		 */
553 		hlist_for_each_entry_rcu(subscription, &subscriptions->list,
554 					 hlist, srcu_read_lock_held(&srcu)) {
555 			if (!subscription->ops->invalidate_range_end)
556 				continue;
557 
558 			subscription->ops->invalidate_range_end(subscription,
559 								range);
560 		}
561 	}
562 	srcu_read_unlock(&srcu, id);
563 
564 	return ret;
565 }
566 
567 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
568 {
569 	struct mmu_notifier_subscriptions *subscriptions =
570 		range->mm->notifier_subscriptions;
571 	int ret;
572 
573 	if (subscriptions->has_itree) {
574 		ret = mn_itree_invalidate(subscriptions, range);
575 		if (ret)
576 			return ret;
577 	}
578 	if (!hlist_empty(&subscriptions->list))
579 		return mn_hlist_invalidate_range_start(subscriptions, range);
580 	return 0;
581 }
582 
583 static void
584 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
585 			struct mmu_notifier_range *range)
586 {
587 	struct mmu_notifier *subscription;
588 	int id;
589 
590 	id = srcu_read_lock(&srcu);
591 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
592 				 srcu_read_lock_held(&srcu)) {
593 		if (subscription->ops->invalidate_range_end) {
594 			if (!mmu_notifier_range_blockable(range))
595 				non_block_start();
596 			subscription->ops->invalidate_range_end(subscription,
597 								range);
598 			if (!mmu_notifier_range_blockable(range))
599 				non_block_end();
600 		}
601 	}
602 	srcu_read_unlock(&srcu, id);
603 }
604 
605 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
606 {
607 	struct mmu_notifier_subscriptions *subscriptions =
608 		range->mm->notifier_subscriptions;
609 
610 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
611 	if (subscriptions->has_itree)
612 		mn_itree_inv_end(subscriptions);
613 
614 	if (!hlist_empty(&subscriptions->list))
615 		mn_hlist_invalidate_end(subscriptions, range);
616 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
617 }
618 
619 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
620 					unsigned long start, unsigned long end)
621 {
622 	struct mmu_notifier *subscription;
623 	int id;
624 
625 	id = srcu_read_lock(&srcu);
626 	hlist_for_each_entry_rcu(subscription,
627 				 &mm->notifier_subscriptions->list, hlist,
628 				 srcu_read_lock_held(&srcu)) {
629 		if (subscription->ops->arch_invalidate_secondary_tlbs)
630 			subscription->ops->arch_invalidate_secondary_tlbs(
631 				subscription, mm,
632 				start, end);
633 	}
634 	srcu_read_unlock(&srcu, id);
635 }
636 
637 /*
638  * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
639  * write mode. A NULL mn signals the notifier is being registered for itree
640  * mode.
641  */
642 int __mmu_notifier_register(struct mmu_notifier *subscription,
643 			    struct mm_struct *mm)
644 {
645 	struct mmu_notifier_subscriptions *subscriptions = NULL;
646 	int ret;
647 
648 	mmap_assert_write_locked(mm);
649 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
650 
651 	/*
652 	 * Subsystems should only register for invalidate_secondary_tlbs() or
653 	 * invalidate_range_start()/end() callbacks, not both.
654 	 */
655 	if (WARN_ON_ONCE(subscription &&
656 			 (subscription->ops->arch_invalidate_secondary_tlbs &&
657 			 (subscription->ops->invalidate_range_start ||
658 			  subscription->ops->invalidate_range_end))))
659 		return -EINVAL;
660 
661 	if (!mm->notifier_subscriptions) {
662 		/*
663 		 * kmalloc cannot be called under mm_take_all_locks(), but we
664 		 * know that mm->notifier_subscriptions can't change while we
665 		 * hold the write side of the mmap_lock.
666 		 */
667 		subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions);
668 		if (!subscriptions)
669 			return -ENOMEM;
670 
671 		INIT_HLIST_HEAD(&subscriptions->list);
672 		spin_lock_init(&subscriptions->lock);
673 		subscriptions->invalidate_seq = 2;
674 		subscriptions->itree = RB_ROOT_CACHED;
675 		init_waitqueue_head(&subscriptions->wq);
676 		INIT_HLIST_HEAD(&subscriptions->deferred_list);
677 	}
678 
679 	ret = mm_take_all_locks(mm);
680 	if (unlikely(ret))
681 		goto out_clean;
682 
683 	/*
684 	 * Serialize the update against mmu_notifier_unregister. A
685 	 * side note: mmu_notifier_release can't run concurrently with
686 	 * us because we hold the mm_users pin (either implicitly as
687 	 * current->mm or explicitly with get_task_mm() or similar).
688 	 * We can't race against any other mmu notifier method either
689 	 * thanks to mm_take_all_locks().
690 	 *
691 	 * release semantics on the initialization of the
692 	 * mmu_notifier_subscriptions's contents are provided for unlocked
693 	 * readers.  acquire can only be used while holding the mmgrab or
694 	 * mmget, and is safe because once created the
695 	 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
696 	 * As above, users holding the mmap_lock or one of the
697 	 * mm_take_all_locks() do not need to use acquire semantics.
698 	 */
699 	if (subscriptions)
700 		smp_store_release(&mm->notifier_subscriptions, subscriptions);
701 
702 	if (subscription) {
703 		/* Pairs with the mmdrop in mmu_notifier_unregister_* */
704 		mmgrab(mm);
705 		subscription->mm = mm;
706 		subscription->users = 1;
707 
708 		spin_lock(&mm->notifier_subscriptions->lock);
709 		hlist_add_head_rcu(&subscription->hlist,
710 				   &mm->notifier_subscriptions->list);
711 		spin_unlock(&mm->notifier_subscriptions->lock);
712 	} else
713 		mm->notifier_subscriptions->has_itree = true;
714 
715 	mm_drop_all_locks(mm);
716 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
717 	return 0;
718 
719 out_clean:
720 	kfree(subscriptions);
721 	return ret;
722 }
723 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
724 
725 /**
726  * mmu_notifier_register - Register a notifier on a mm
727  * @subscription: The notifier to attach
728  * @mm: The mm to attach the notifier to
729  *
730  * Must not hold mmap_lock nor any other VM related lock when calling
731  * this registration function. Must also ensure mm_users can't go down
732  * to zero while this runs to avoid races with mmu_notifier_release,
733  * so mm has to be current->mm or the mm should be pinned safely such
734  * as with get_task_mm(). If the mm is not current->mm, the mm_users
735  * pin should be released by calling mmput after mmu_notifier_register
736  * returns.
737  *
738  * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
739  * unregister the notifier.
740  *
741  * While the caller has a mmu_notifier get the subscription->mm pointer will remain
742  * valid, and can be converted to an active mm pointer via mmget_not_zero().
743  */
744 int mmu_notifier_register(struct mmu_notifier *subscription,
745 			  struct mm_struct *mm)
746 {
747 	int ret;
748 
749 	mmap_write_lock(mm);
750 	ret = __mmu_notifier_register(subscription, mm);
751 	mmap_write_unlock(mm);
752 	return ret;
753 }
754 EXPORT_SYMBOL_GPL(mmu_notifier_register);
755 
756 static struct mmu_notifier *
757 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
758 {
759 	struct mmu_notifier *subscription;
760 
761 	spin_lock(&mm->notifier_subscriptions->lock);
762 	hlist_for_each_entry_rcu(subscription,
763 				 &mm->notifier_subscriptions->list, hlist,
764 				 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
765 		if (subscription->ops != ops)
766 			continue;
767 
768 		if (likely(subscription->users != UINT_MAX))
769 			subscription->users++;
770 		else
771 			subscription = ERR_PTR(-EOVERFLOW);
772 		spin_unlock(&mm->notifier_subscriptions->lock);
773 		return subscription;
774 	}
775 	spin_unlock(&mm->notifier_subscriptions->lock);
776 	return NULL;
777 }
778 
779 /**
780  * mmu_notifier_get_locked - Return the single struct mmu_notifier for
781  *                           the mm & ops
782  * @ops: The operations struct being subscribe with
783  * @mm : The mm to attach notifiers too
784  *
785  * This function either allocates a new mmu_notifier via
786  * ops->alloc_notifier(), or returns an already existing notifier on the
787  * list. The value of the ops pointer is used to determine when two notifiers
788  * are the same.
789  *
790  * Each call to mmu_notifier_get() must be paired with a call to
791  * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
792  *
793  * While the caller has a mmu_notifier get the mm pointer will remain valid,
794  * and can be converted to an active mm pointer via mmget_not_zero().
795  */
796 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
797 					     struct mm_struct *mm)
798 {
799 	struct mmu_notifier *subscription;
800 	int ret;
801 
802 	mmap_assert_write_locked(mm);
803 
804 	if (mm->notifier_subscriptions) {
805 		subscription = find_get_mmu_notifier(mm, ops);
806 		if (subscription)
807 			return subscription;
808 	}
809 
810 	subscription = ops->alloc_notifier(mm);
811 	if (IS_ERR(subscription))
812 		return subscription;
813 	subscription->ops = ops;
814 	ret = __mmu_notifier_register(subscription, mm);
815 	if (ret)
816 		goto out_free;
817 	return subscription;
818 out_free:
819 	subscription->ops->free_notifier(subscription);
820 	return ERR_PTR(ret);
821 }
822 EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
823 
824 /* this is called after the last mmu_notifier_unregister() returned */
825 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
826 {
827 	BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
828 	kfree(mm->notifier_subscriptions);
829 	mm->notifier_subscriptions = LIST_POISON1; /* debug */
830 }
831 
832 /*
833  * This releases the mm_count pin automatically and frees the mm
834  * structure if it was the last user of it. It serializes against
835  * running mmu notifiers with SRCU and against mmu_notifier_unregister
836  * with the unregister lock + SRCU. All sptes must be dropped before
837  * calling mmu_notifier_unregister. ->release or any other notifier
838  * method may be invoked concurrently with mmu_notifier_unregister,
839  * and only after mmu_notifier_unregister returned we're guaranteed
840  * that ->release or any other method can't run anymore.
841  */
842 void mmu_notifier_unregister(struct mmu_notifier *subscription,
843 			     struct mm_struct *mm)
844 {
845 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
846 
847 	if (!hlist_unhashed(&subscription->hlist)) {
848 		/*
849 		 * SRCU here will force exit_mmap to wait for ->release to
850 		 * finish before freeing the pages.
851 		 */
852 		int id;
853 
854 		id = srcu_read_lock(&srcu);
855 		/*
856 		 * exit_mmap will block in mmu_notifier_release to guarantee
857 		 * that ->release is called before freeing the pages.
858 		 */
859 		if (subscription->ops->release)
860 			subscription->ops->release(subscription, mm);
861 		srcu_read_unlock(&srcu, id);
862 
863 		spin_lock(&mm->notifier_subscriptions->lock);
864 		/*
865 		 * Can not use list_del_rcu() since __mmu_notifier_release
866 		 * can delete it before we hold the lock.
867 		 */
868 		hlist_del_init_rcu(&subscription->hlist);
869 		spin_unlock(&mm->notifier_subscriptions->lock);
870 	}
871 
872 	/*
873 	 * Wait for any running method to finish, of course including
874 	 * ->release if it was run by mmu_notifier_release instead of us.
875 	 */
876 	synchronize_srcu(&srcu);
877 
878 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
879 
880 	mmdrop(mm);
881 }
882 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
883 
884 static void mmu_notifier_free_rcu(struct rcu_head *rcu)
885 {
886 	struct mmu_notifier *subscription =
887 		container_of(rcu, struct mmu_notifier, rcu);
888 	struct mm_struct *mm = subscription->mm;
889 
890 	subscription->ops->free_notifier(subscription);
891 	/* Pairs with the get in __mmu_notifier_register() */
892 	mmdrop(mm);
893 }
894 
895 /**
896  * mmu_notifier_put - Release the reference on the notifier
897  * @subscription: The notifier to act on
898  *
899  * This function must be paired with each mmu_notifier_get(), it releases the
900  * reference obtained by the get. If this is the last reference then process
901  * to free the notifier will be run asynchronously.
902  *
903  * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
904  * when the mm_struct is destroyed. Instead free_notifier is always called to
905  * release any resources held by the user.
906  *
907  * As ops->release is not guaranteed to be called, the user must ensure that
908  * all sptes are dropped, and no new sptes can be established before
909  * mmu_notifier_put() is called.
910  *
911  * This function can be called from the ops->release callback, however the
912  * caller must still ensure it is called pairwise with mmu_notifier_get().
913  *
914  * Modules calling this function must call mmu_notifier_synchronize() in
915  * their __exit functions to ensure the async work is completed.
916  */
917 void mmu_notifier_put(struct mmu_notifier *subscription)
918 {
919 	struct mm_struct *mm = subscription->mm;
920 
921 	spin_lock(&mm->notifier_subscriptions->lock);
922 	if (WARN_ON(!subscription->users) || --subscription->users)
923 		goto out_unlock;
924 	hlist_del_init_rcu(&subscription->hlist);
925 	spin_unlock(&mm->notifier_subscriptions->lock);
926 
927 	call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
928 	return;
929 
930 out_unlock:
931 	spin_unlock(&mm->notifier_subscriptions->lock);
932 }
933 EXPORT_SYMBOL_GPL(mmu_notifier_put);
934 
935 static int __mmu_interval_notifier_insert(
936 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
937 	struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
938 	unsigned long length, const struct mmu_interval_notifier_ops *ops)
939 {
940 	interval_sub->mm = mm;
941 	interval_sub->ops = ops;
942 	RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
943 	interval_sub->interval_tree.start = start;
944 	/*
945 	 * Note that the representation of the intervals in the interval tree
946 	 * considers the ending point as contained in the interval.
947 	 */
948 	if (length == 0 ||
949 	    check_add_overflow(start, length - 1,
950 			       &interval_sub->interval_tree.last))
951 		return -EOVERFLOW;
952 
953 	/* Must call with a mmget() held */
954 	if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
955 		return -EINVAL;
956 
957 	/* pairs with mmdrop in mmu_interval_notifier_remove() */
958 	mmgrab(mm);
959 
960 	/*
961 	 * If some invalidate_range_start/end region is going on in parallel
962 	 * we don't know what VA ranges are affected, so we must assume this
963 	 * new range is included.
964 	 *
965 	 * If the itree is invalidating then we are not allowed to change
966 	 * it. Retrying until invalidation is done is tricky due to the
967 	 * possibility for live lock, instead defer the add to
968 	 * mn_itree_inv_end() so this algorithm is deterministic.
969 	 *
970 	 * In all cases the value for the interval_sub->invalidate_seq should be
971 	 * odd, see mmu_interval_read_begin()
972 	 */
973 	spin_lock(&subscriptions->lock);
974 	if (subscriptions->active_invalidate_ranges) {
975 		if (mn_itree_is_invalidating(subscriptions))
976 			hlist_add_head(&interval_sub->deferred_item,
977 				       &subscriptions->deferred_list);
978 		else {
979 			subscriptions->invalidate_seq |= 1;
980 			interval_tree_insert(&interval_sub->interval_tree,
981 					     &subscriptions->itree);
982 		}
983 		interval_sub->invalidate_seq = subscriptions->invalidate_seq;
984 	} else {
985 		WARN_ON(mn_itree_is_invalidating(subscriptions));
986 		/*
987 		 * The starting seq for a subscription not under invalidation
988 		 * should be odd, not equal to the current invalidate_seq and
989 		 * invalidate_seq should not 'wrap' to the new seq any time
990 		 * soon.
991 		 */
992 		interval_sub->invalidate_seq =
993 			subscriptions->invalidate_seq - 1;
994 		interval_tree_insert(&interval_sub->interval_tree,
995 				     &subscriptions->itree);
996 	}
997 	spin_unlock(&subscriptions->lock);
998 	return 0;
999 }
1000 
1001 /**
1002  * mmu_interval_notifier_insert - Insert an interval notifier
1003  * @interval_sub: Interval subscription to register
1004  * @start: Starting virtual address to monitor
1005  * @length: Length of the range to monitor
1006  * @mm: mm_struct to attach to
1007  * @ops: Interval notifier operations to be called on matching events
1008  *
1009  * This function subscribes the interval notifier for notifications from the
1010  * mm.  Upon return the ops related to mmu_interval_notifier will be called
1011  * whenever an event that intersects with the given range occurs.
1012  *
1013  * Upon return the range_notifier may not be present in the interval tree yet.
1014  * The caller must use the normal interval notifier read flow via
1015  * mmu_interval_read_begin() to establish SPTEs for this range.
1016  */
1017 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
1018 				 struct mm_struct *mm, unsigned long start,
1019 				 unsigned long length,
1020 				 const struct mmu_interval_notifier_ops *ops)
1021 {
1022 	struct mmu_notifier_subscriptions *subscriptions;
1023 	int ret;
1024 
1025 	WARN_ON_ONCE(ops->invalidate_start && !ops->invalidate_finish);
1026 	might_lock(&mm->mmap_lock);
1027 
1028 	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
1029 	if (!subscriptions || !subscriptions->has_itree) {
1030 		ret = mmu_notifier_register(NULL, mm);
1031 		if (ret)
1032 			return ret;
1033 		subscriptions = mm->notifier_subscriptions;
1034 	}
1035 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1036 					      start, length, ops);
1037 }
1038 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
1039 
1040 int mmu_interval_notifier_insert_locked(
1041 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1042 	unsigned long start, unsigned long length,
1043 	const struct mmu_interval_notifier_ops *ops)
1044 {
1045 	struct mmu_notifier_subscriptions *subscriptions =
1046 		mm->notifier_subscriptions;
1047 	int ret;
1048 
1049 	mmap_assert_write_locked(mm);
1050 
1051 	if (!subscriptions || !subscriptions->has_itree) {
1052 		ret = __mmu_notifier_register(NULL, mm);
1053 		if (ret)
1054 			return ret;
1055 		subscriptions = mm->notifier_subscriptions;
1056 	}
1057 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1058 					      start, length, ops);
1059 }
1060 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1061 
1062 static bool
1063 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1064 			  unsigned long seq)
1065 {
1066 	bool ret;
1067 
1068 	spin_lock(&subscriptions->lock);
1069 	ret = subscriptions->invalidate_seq != seq;
1070 	spin_unlock(&subscriptions->lock);
1071 	return ret;
1072 }
1073 
1074 /**
1075  * mmu_interval_notifier_remove - Remove a interval notifier
1076  * @interval_sub: Interval subscription to unregister
1077  *
1078  * This function must be paired with mmu_interval_notifier_insert(). It cannot
1079  * be called from any ops callback.
1080  *
1081  * Once this returns ops callbacks are no longer running on other CPUs and
1082  * will not be called in future.
1083  */
1084 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1085 {
1086 	struct mm_struct *mm = interval_sub->mm;
1087 	struct mmu_notifier_subscriptions *subscriptions =
1088 		mm->notifier_subscriptions;
1089 	unsigned long seq = 0;
1090 
1091 	might_sleep();
1092 
1093 	spin_lock(&subscriptions->lock);
1094 	if (mn_itree_is_invalidating(subscriptions)) {
1095 		/*
1096 		 * remove is being called after insert put this on the
1097 		 * deferred list, but before the deferred list was processed.
1098 		 */
1099 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1100 			hlist_del(&interval_sub->deferred_item);
1101 		} else {
1102 			hlist_add_head(&interval_sub->deferred_item,
1103 				       &subscriptions->deferred_list);
1104 			seq = subscriptions->invalidate_seq;
1105 		}
1106 	} else {
1107 		WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1108 		interval_tree_remove(&interval_sub->interval_tree,
1109 				     &subscriptions->itree);
1110 	}
1111 	spin_unlock(&subscriptions->lock);
1112 
1113 	/*
1114 	 * The possible sleep on progress in the invalidation requires the
1115 	 * caller not hold any locks held by invalidation callbacks.
1116 	 */
1117 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1118 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1119 	if (seq)
1120 		wait_event(subscriptions->wq,
1121 			   mmu_interval_seq_released(subscriptions, seq));
1122 
1123 	/* pairs with mmgrab in mmu_interval_notifier_insert() */
1124 	mmdrop(mm);
1125 }
1126 EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1127 
1128 /**
1129  * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1130  *
1131  * This function ensures that all outstanding async SRU work from
1132  * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1133  * associated with an unused mmu_notifier will no longer be called.
1134  *
1135  * Before using the caller must ensure that all of its mmu_notifiers have been
1136  * fully released via mmu_notifier_put().
1137  *
1138  * Modules using the mmu_notifier_put() API should call this in their __exit
1139  * function to avoid module unloading races.
1140  */
1141 void mmu_notifier_synchronize(void)
1142 {
1143 	synchronize_srcu(&srcu);
1144 }
1145 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1146