Lines Matching refs:mmu_lock
760 * an emulated write will see the elevated count and acquire mmu_lock
863 * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
864 * operates with mmu_lock held for write), but rmaps can be walked without
865 * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
869 * done while holding mmu_lock for write. This allows a task walking rmaps
870 * without holding mmu_lock to concurrently walk the same entries as a task
871 * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify
890 * do so while holding mmu_lock for write, and are mutually exclusive.
938 lockdep_assert_held_write(&kvm->mmu_lock);
960 lockdep_assert_held_write(&kvm->mmu_lock);
971 * If mmu_lock isn't held, rmaps can only be locked in read-only mode. The
1591 lockdep_assert_held_write(&kvm->mmu_lock);
1601 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1607 cond_resched_rwlock_write(&kvm->mmu_lock);
1993 * mmu_lock when the TDP MMU is enabled, i.e. when the hash table of
2000 lockdep_assert_held(&kvm->mmu_lock);
2219 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2226 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2683 lockdep_assert_held_write(&kvm->mmu_lock);
2850 write_lock(&kvm->mmu_lock);
2861 write_unlock(&kvm->mmu_lock);
2875 * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
2877 * mmu_lock is safe, as this is purely an optimization, i.e. a false
2890 write_lock(&kvm->mmu_lock);
2900 write_unlock(&kvm->mmu_lock);
2957 * run with mmu_lock held for read, not write, and the unsync
2960 * no meaningful penalty if mmu_lock is held for write.
2970 * possible as clearing sp->unsync _must_ hold mmu_lock
2972 * while this CPU holds mmu_lock for read (or write).
3208 * consuming it. In this case, mmu_lock doesn't need to be held during the
3211 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3217 * not required to hold mmu_lock (though it's highly likely the caller will
3218 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3348 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3535 * by setting the Writable bit, which can be done out of mmu_lock.
3667 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3738 lockdep_assert_held_read(&kvm->mmu_lock);
3741 lockdep_assert_held_write(&kvm->mmu_lock);
3777 read_lock(&kvm->mmu_lock);
3779 write_lock(&kvm->mmu_lock);
3806 read_unlock(&kvm->mmu_lock);
3810 write_unlock(&kvm->mmu_lock);
3876 write_lock(&vcpu->kvm->mmu_lock);
3908 write_unlock(&vcpu->kvm->mmu_lock);
3926 * kvm_get_mmu_page_hash(). Note, mmu_lock must be held for write to
3928 * an empty list for their current mmu_lock critical section.
4017 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
4034 write_lock(&vcpu->kvm->mmu_lock);
4112 write_unlock(&vcpu->kvm->mmu_lock);
4245 write_lock(&vcpu->kvm->mmu_lock);
4247 write_unlock(&vcpu->kvm->mmu_lock);
4251 write_lock(&vcpu->kvm->mmu_lock);
4262 write_unlock(&vcpu->kvm->mmu_lock);
4684 * the pfn from the primary MMU, and before acquiring mmu_lock.
4686 * For mmu_lock, if there is an in-progress invalidation and the kernel
4687 * allows preemption, the invalidation task may drop mmu_lock and yield
4688 * in response to mmu_lock being contended, which is *very* counter-
4699 * will never yield mmu_lock in response to contention, as this vCPU is
4700 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4718 * avoid contending mmu_lock. Most invalidations will be detected by
4721 * mmu_lock is acquired.
4757 * now that mmu_lock is held, as the "unsafe" checks performed without
4758 * holding mmu_lock can get false negatives.
4788 write_lock(&vcpu->kvm->mmu_lock);
4801 write_unlock(&vcpu->kvm->mmu_lock);
4879 read_lock(&vcpu->kvm->mmu_lock);
4888 read_unlock(&vcpu->kvm->mmu_lock);
6161 write_lock(&vcpu->kvm->mmu_lock);
6190 write_unlock(&vcpu->kvm->mmu_lock);
6400 write_lock(&vcpu->kvm->mmu_lock);
6416 write_unlock(&vcpu->kvm->mmu_lock);
6648 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6686 write_lock(&kvm->mmu_lock);
6700 * invalidating TDP MMU roots must be done while holding mmu_lock for
6702 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6717 * Note: we need to do this under the protection of mmu_lock,
6724 write_unlock(&kvm->mmu_lock);
6823 write_lock(&kvm->mmu_lock);
6839 write_unlock(&kvm->mmu_lock);
6854 write_lock(&kvm->mmu_lock);
6857 write_unlock(&kvm->mmu_lock);
6861 read_lock(&kvm->mmu_lock);
6863 read_unlock(&kvm->mmu_lock);
6874 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6899 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
7013 write_unlock(&kvm->mmu_lock);
7021 write_lock(&kvm->mmu_lock);
7096 /* Must be called with the mmu_lock held in write-mode. */
7127 write_lock(&kvm->mmu_lock);
7129 write_unlock(&kvm->mmu_lock);
7132 read_lock(&kvm->mmu_lock);
7134 read_unlock(&kvm->mmu_lock);
7200 write_lock(&kvm->mmu_lock);
7202 write_unlock(&kvm->mmu_lock);
7206 read_lock(&kvm->mmu_lock);
7208 read_unlock(&kvm->mmu_lock);
7216 write_lock(&kvm->mmu_lock);
7222 write_unlock(&kvm->mmu_lock);
7226 read_lock(&kvm->mmu_lock);
7228 read_unlock(&kvm->mmu_lock);
7247 write_lock(&kvm->mmu_lock);
7254 if (cond_resched_rwlock_write(&kvm->mmu_lock))
7263 write_unlock(&kvm->mmu_lock);
7294 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7297 cond_resched_rwlock_write(&kvm->mmu_lock);
7317 write_lock(&kvm->mmu_lock);
7320 write_unlock(&kvm->mmu_lock);
7513 read_lock(&vcpu->kvm->mmu_lock);
7516 read_unlock(&vcpu->kvm->mmu_lock);
7597 write_lock(&kvm->mmu_lock);
7662 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7666 cond_resched_rwlock_write(&kvm->mmu_lock);
7676 write_unlock(&kvm->mmu_lock);
7850 lockdep_assert_held_write(&kvm->mmu_lock);