Lines Matching refs:mmu_lock

26 		lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
129 * role.invalid are protected by mmu_lock.
131 lockdep_assert_held(&kvm->mmu_lock);
171 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
181 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
185 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
186 * the implication being that any flow that holds mmu_lock for read is
188 * Holding mmu_lock for write obviates the need for RCU protection as the list
202 * used without holding the mmu_lock at all, any bits that are flipped must
270 read_lock(&kvm->mmu_lock);
285 * root has been invalidated, which requires holding mmu_lock for write.
309 read_unlock(&kvm->mmu_lock);
313 * mmu_lock, and the root can't be freed as this vCPU holds a reference.
379 lockdep_assert_held_write(&kvm->mmu_lock);
438 * mmu_lock ensures the SPTE can't be made present.
450 * modified by a different vCPU outside of mmu_lock.
459 * task can zap/remove the SPTE as mmu_lock is held for
527 lockdep_assert_held(&kvm->mmu_lock);
686 * and does not hold the mmu_lock. On failure, i.e. if a
722 lockdep_assert_held_read(&kvm->mmu_lock);
750 lockdep_assert_held_write(&kvm->mmu_lock);
799 if (!need_resched() && !rwlock_needbreak(&kvm->mmu_lock))
835 cond_resched_rwlock_read(&kvm->mmu_lock);
837 cond_resched_rwlock_write(&kvm->mmu_lock);
888 * mmu_lock. When handling an unmap/release mmu_notifier command, KVM
890 * callback. Dropping mmu_lock with an unreachable root would result
904 * preempt models) or mmu_lock contention (full or real-time models).
963 lockdep_assert_held_write(&kvm->mmu_lock);
1006 lockdep_assert_held_write(&kvm->mmu_lock);
1024 * and mmu_lock is already held, which means the other thread has yielded.
1030 lockdep_assert_held_write(&kvm->mmu_lock);
1045 read_lock(&kvm->mmu_lock);
1047 write_lock(&kvm->mmu_lock);
1076 read_unlock(&kvm->mmu_lock);
1078 write_unlock(&kvm->mmu_lock);
1084 * zapping is done separately so that it happens with mmu_lock with read,
1085 * whereas invalidating roots must be done with mmu_lock held for write (unless
1104 * mmu_lock must be held for write to ensure that a root doesn't become
1116 lockdep_assert_held_write(&kvm->mmu_lock);
1119 * As above, mmu_lock isn't held when destroying the VM! There can't
1467 lockdep_assert_held_read(&kvm->mmu_lock);
1565 read_unlock(&kvm->mmu_lock);
1567 write_unlock(&kvm->mmu_lock);
1572 read_lock(&kvm->mmu_lock);
1574 write_lock(&kvm->mmu_lock);
1682 lockdep_assert_held_read(&kvm->mmu_lock);
1695 lockdep_assert_held_write(&kvm->mmu_lock);
1847 lockdep_assert_held_read(&kvm->mmu_lock);
1900 lockdep_assert_held_write(&kvm->mmu_lock);
1946 lockdep_assert_held(&kvm->mmu_lock);
1990 * outside of mmu_lock.