Lines Matching +full:can +full:- +full:secondary
1 /* SPDX-License-Identifier: GPL-2.0 */
18 * enum mmu_notifier_event - reason for the mmu notifier callback
68 * freed. This can run concurrently with other mmu notifier
70 * should tear down all secondary mmu mappings and freeze the
71 * secondary mmu. If this method isn't implemented you've to
73 * through the secondary mmu by the time the last thread with
74 * tsk->mm == mm exits.
76 * As side note: the pages freed after ->release returns could
78 * address with a different cache model, so if ->release isn't
80 * through the secondary mmu are terminated by the time the
82 * speculative _hardware_ operations can't allocate dirty
93 * test-and-clearing the young/accessed bitflag in the
95 * accesses to the page through the secondary MMUs and not
97 * Start-end is necessary in case the secondary MMU is mapping the page
107 * latter, it is supposed to test-and-clear the young/accessed bitflag
108 * in the secondary pte, but it may omit flushing the secondary tlb.
117 * the secondary pte. This is used to know if the page is
119 * down the secondary mapping on the page.
138 * can't guarantee that no additional references are taken to
168 * invalidate_range_start() then the VM can free pages as page
172 * any secondary tlb before doing the final free on the
178 * sleep and has to return with -EAGAIN if sleeping would be required.
179 * 0 should be returned otherwise. Please note that notifiers that can
190 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
191 * which shares page-tables with the CPU. The
197 * holding the ptl spin-lock and therefore this callback is not allowed
201 * entry. It is assumed that any secondary TLB has the same rules for
203 * code will need to call this explicitly when required for secondary
231 * Therefore notifier chains can only be traversed when either
234 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
235 * 3. No other concurrent thread can access the list (release)
248 * range. This function can sleep. Return false only if sleeping
282 return unlikely(mm->notifier_subscriptions); in mm_has_notifiers()
320 * mmu_interval_set_seq - Save the invalidation sequence
321 * @interval_sub - The subscription passed to invalidate
322 * @cur_seq - The cur_seq passed to the invalidate() callback
336 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq); in mmu_interval_set_seq()
340 * mmu_interval_read_retry - End a read side critical section against a VA range
345 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
357 return interval_sub->invalidate_seq != seq; in mmu_interval_read_retry()
361 * mmu_interval_check_retry - Test if a collision has occurred
365 * This can be used in the critical section between mmu_interval_read_begin()
371 * occurred. It can be called many times and does not have to hold the user
374 * This call can be used as part of loops and other expensive operations to
382 return READ_ONCE(interval_sub->invalidate_seq) != seq; in mmu_interval_check_retry()
407 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE); in mmu_notifier_range_blockable()
455 if (mm_has_notifiers(range->mm)) { in mmu_notifier_invalidate_range_start()
456 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; in mmu_notifier_invalidate_range_start()
464 * can return an error if a notifier can't proceed without blocking, in which
475 if (mm_has_notifiers(range->mm)) { in mmu_notifier_invalidate_range_start_nonblock()
476 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; in mmu_notifier_invalidate_range_start_nonblock()
489 if (mm_has_notifiers(range->mm)) in mmu_notifier_invalidate_range_end()
502 mm->notifier_subscriptions = NULL; in mmu_notifier_subscriptions_init()
519 range->event = event; in mmu_notifier_range_init()
520 range->mm = mm; in mmu_notifier_range_init()
521 range->start = start; in mmu_notifier_range_init()
522 range->end = end; in mmu_notifier_range_init()
523 range->flags = flags; in mmu_notifier_range_init()
533 range->owner = owner; in mmu_notifier_range_init_owner()
542 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
555 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
568 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
579 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
586 * This is safe to start by updating the secondary MMUs, because the primary MMU
588 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
589 * required when we change both the protection of the mapping from read-only to
590 * read-write and the pfn (like during copy on write page faults). Otherwise the
591 * old page would remain mapped readonly in the secondary MMUs after the new
615 range->start = start; in _mmu_notifier_range_init()
616 range->end = end; in _mmu_notifier_range_init()