Lines Matching full:mm
3 * linux/mm/mmu_notifier.c
13 #include <linux/mm.h>
19 #include <linux/sched/mm.h>
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
38 /* all mmu notifiers registered in this mm are queued in this list */
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
60 * progress on the mm side.
67 * - mm->active_invalidate_ranges != 0
73 * - mm->active_invalidate_ranges != 0
173 * subscription. If the mm invokes invalidation during the critical section
189 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin()
262 struct mm_struct *mm) in mn_itree_release() argument
267 .mm = mm, in mn_itree_release()
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
291 * in parallel despite there being no task using this mm any more,
300 struct mm_struct *mm) in mn_hlist_release() argument
316 * sptes before all the pages in the mm are freed. in mn_hlist_release()
319 subscription->ops->release(subscription, mm); in mn_hlist_release()
338 * exit_mmap (which would proceed with freeing all pages in the mm) in mn_hlist_release()
348 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
351 mm->notifier_subscriptions; in __mmu_notifier_release()
354 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
357 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
365 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
374 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_flush_young()
378 subscription, mm, start, end); in __mmu_notifier_clear_flush_young()
385 int __mmu_notifier_clear_young(struct mm_struct *mm, in __mmu_notifier_clear_young() argument
394 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_young()
398 mm, start, end); in __mmu_notifier_clear_young()
405 int __mmu_notifier_test_young(struct mm_struct *mm, in __mmu_notifier_test_young() argument
413 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_test_young()
416 young = subscription->ops->test_young(subscription, mm, in __mmu_notifier_test_young()
427 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument
435 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_change_pte()
438 subscription->ops->change_pte(subscription, mm, address, in __mmu_notifier_change_pte()
539 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
577 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
588 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, in __mmu_notifier_arch_invalidate_secondary_tlbs() argument
596 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_arch_invalidate_secondary_tlbs()
600 subscription, mm, in __mmu_notifier_arch_invalidate_secondary_tlbs()
612 struct mm_struct *mm) in __mmu_notifier_register() argument
617 mmap_assert_write_locked(mm); in __mmu_notifier_register()
618 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
630 if (!mm->notifier_subscriptions) { in __mmu_notifier_register()
633 * know that mm->notifier_subscriptions can't change while we in __mmu_notifier_register()
649 ret = mm_take_all_locks(mm); in __mmu_notifier_register()
657 * current->mm or explicitly with get_task_mm() or similar). in __mmu_notifier_register()
665 * mmu_notifier_subscriptions is not freed until the mm is destroyed. in __mmu_notifier_register()
670 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
674 mmgrab(mm); in __mmu_notifier_register()
675 subscription->mm = mm; in __mmu_notifier_register()
678 spin_lock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
680 &mm->notifier_subscriptions->list); in __mmu_notifier_register()
681 spin_unlock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
683 mm->notifier_subscriptions->has_itree = true; in __mmu_notifier_register()
685 mm_drop_all_locks(mm); in __mmu_notifier_register()
686 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
696 * mmu_notifier_register - Register a notifier on a mm
698 * @mm: The mm to attach the notifier to
703 * so mm has to be current->mm or the mm should be pinned safely such
704 * as with get_task_mm(). If the mm is not current->mm, the mm_users
711 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
712 * valid, and can be converted to an active mm pointer via mmget_not_zero().
715 struct mm_struct *mm) in mmu_notifier_register() argument
719 mmap_write_lock(mm); in mmu_notifier_register()
720 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_register()
721 mmap_write_unlock(mm); in mmu_notifier_register()
727 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) in find_get_mmu_notifier() argument
731 spin_lock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
733 &mm->notifier_subscriptions->list, hlist, in find_get_mmu_notifier()
734 lockdep_is_held(&mm->notifier_subscriptions->lock)) { in find_get_mmu_notifier()
742 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
745 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
751 * the mm & ops
753 * @mm : The mm to attach notifiers too
761 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
763 * While the caller has a mmu_notifier get the mm pointer will remain valid,
764 * and can be converted to an active mm pointer via mmget_not_zero().
767 struct mm_struct *mm) in mmu_notifier_get_locked() argument
772 mmap_assert_write_locked(mm); in mmu_notifier_get_locked()
774 if (mm->notifier_subscriptions) { in mmu_notifier_get_locked()
775 subscription = find_get_mmu_notifier(mm, ops); in mmu_notifier_get_locked()
780 subscription = ops->alloc_notifier(mm); in mmu_notifier_get_locked()
784 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_get_locked()
795 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) in __mmu_notifier_subscriptions_destroy() argument
797 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); in __mmu_notifier_subscriptions_destroy()
798 kfree(mm->notifier_subscriptions); in __mmu_notifier_subscriptions_destroy()
799 mm->notifier_subscriptions = LIST_POISON1; /* debug */ in __mmu_notifier_subscriptions_destroy()
803 * This releases the mm_count pin automatically and frees the mm
813 struct mm_struct *mm) in mmu_notifier_unregister() argument
815 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
830 subscription->ops->release(subscription, mm); in mmu_notifier_unregister()
833 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
839 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
848 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
850 mmdrop(mm); in mmu_notifier_unregister()
858 struct mm_struct *mm = subscription->mm; in mmu_notifier_free_rcu() local
862 mmdrop(mm); in mmu_notifier_free_rcu()
889 struct mm_struct *mm = subscription->mm; in mmu_notifier_put() local
891 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
895 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
901 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
906 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in __mmu_interval_notifier_insert() argument
910 interval_sub->mm = mm; in __mmu_interval_notifier_insert()
924 if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) in __mmu_interval_notifier_insert()
928 mmgrab(mm); in __mmu_interval_notifier_insert()
976 * @mm: mm_struct to attach to
980 * mm. Upon return the ops related to mmu_interval_notifier will be called
988 struct mm_struct *mm, unsigned long start, in mmu_interval_notifier_insert() argument
995 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
997 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
999 ret = mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert()
1002 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
1004 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
1010 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in mmu_interval_notifier_insert_locked() argument
1015 mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1018 mmap_assert_write_locked(mm); in mmu_interval_notifier_insert_locked()
1021 ret = __mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert_locked()
1024 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1026 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1055 struct mm_struct *mm = interval_sub->mm; in mmu_interval_notifier_remove() local
1057 mm->notifier_subscriptions; in mmu_interval_notifier_remove()
1093 mmdrop(mm); in mmu_interval_notifier_remove()