/linux/virt/kvm/ |
H A D | kvm_mm.h | 8 * for the mmu_lock. These macros, for use in common code 14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) 15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) 16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) 18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) 19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) 20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
|
/linux/arch/x86/kvm/mmu/ |
H A D | page_track.c | 93 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_write_track_add_gfn() 116 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_write_track_remove_gfn() 239 write_lock(&kvm->mmu_lock); in kvm_page_track_register_notifier() 241 write_unlock(&kvm->mmu_lock); in kvm_page_track_register_notifier() 257 write_lock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier() 259 write_unlock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier() 335 write_lock(&kvm->mmu_lock); in kvm_write_track_add_gfn() 337 write_unlock(&kvm->mmu_lock); in kvm_write_track_add_gfn() 365 write_lock(&kvm->mmu_lock); in kvm_write_track_remove_gfn() 367 write_unlock(&kvm->mmu_lock); in kvm_write_track_remove_gfn() [all...] |
H A D | tdp_mmu.c | 26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held() 129 * role.invalid are protected by mmu_lock. in tdp_mmu_next_root() 131 lockdep_assert_held(&kvm->mmu_lock); in tdp_mmu_next_root() 171 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ 181 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ 185 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 186 * the implication being that any flow that holds mmu_lock for read is 188 * Holding mmu_lock for write obviates the need for RCU protection as the list 202 * used without holding the mmu_lock a [all...] |
H A D | mmu.c | 760 * an emulated write will see the elevated count and acquire mmu_lock in account_shadowed() 863 * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always 864 * operates with mmu_lock held for write), but rmaps can be walked without 865 * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain 869 * done while holding mmu_lock for write. This allows a task walking rmaps 870 * without holding mmu_lock to concurrently walk the same entries as a task 871 * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify 890 * do so while holding mmu_lock for write, and are mutually exclusive. in __kvm_rmap_lock() 938 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_lock() 960 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_unlock() [all...] |
/linux/arch/powerpc/kvm/ |
H A D | book3s_hv_nested.c | 766 * so we don't need to hold kvm->mmu_lock. in kvmhv_release_nested() 783 spin_lock(&kvm->mmu_lock); in kvmhv_remove_nested() 789 spin_unlock(&kvm->mmu_lock); in kvmhv_remove_nested() 808 spin_lock(&kvm->mmu_lock); in kvmhv_release_all_nested() 818 spin_unlock(&kvm->mmu_lock); in kvmhv_release_all_nested() 835 spin_lock(&kvm->mmu_lock); in kvmhv_flush_nested() 837 spin_unlock(&kvm->mmu_lock); in kvmhv_flush_nested() 852 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested() 856 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested() 870 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested() [all...] |
H A D | book3s_mmu_hpte.c | 63 spin_lock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map() 92 spin_unlock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map() 104 spin_lock(&vcpu3s->mmu_lock); in invalidate_pte() 108 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte() 121 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte() 363 spin_lock_init(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_init()
|
H A D | book3s_64_mmu_radix.c | 422 /* Called with kvm->mmu_lock held */ 647 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte() 783 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte() 852 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page() 857 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page() 990 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault() 994 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault() 1010 /* Called with kvm->mmu_lock held */ 1029 /* Called with kvm->mmu_lock held */ 1057 /* Called with kvm->mmu_lock hel [all...] |
H A D | book3s_hv_rm_mmu.c | 248 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter() 263 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter() 277 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter() 938 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero() 950 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero() 966 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy() 981 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
|
H A D | book3s_64_mmu_host.c | 151 spin_lock(&kvm->mmu_lock); in kvmppc_mmu_map_page() 206 spin_unlock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
|
H A D | book3s_64_mmu_hv.c | 615 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault() 620 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault() 749 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset() 756 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset() 1377 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot() 1384 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
|
H A D | e500_mmu_host.c | 366 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map() 377 * We are holding kvm->mmu_lock so a notifier invalidate in kvmppc_e500_shadow_map() 468 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
|
/linux/arch/arm64/kvm/ |
H A D | mmu.c | 81 cond_resched_rwlock_write(&kvm->mmu_lock); in stage2_apply_range() 110 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) in need_split_memcache_topup_or_resched() 127 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_mmu_split_huge_pages() 139 write_unlock(&kvm->mmu_lock); in kvm_mmu_split_huge_pages() 145 write_lock(&kvm->mmu_lock); in kvm_mmu_split_huge_pages() 323 * be called while holding mmu_lock (unless for freeing the stage2 pgd before 333 lockdep_assert_held_write(&kvm->mmu_lock); in __unmap_stage2_range() 373 write_lock(&kvm->mmu_lock); in stage2_flush_vm() 381 write_unlock(&kvm->mmu_lock); in stage2_flush_vm() 1051 write_lock(&kvm->mmu_lock); in stage2_unmap_vm() [all...] |
H A D | nested.c | 33 * mmu_lock held. 473 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock); in get_guest_mapping_ttl() 597 write_lock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid() 609 write_unlock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid() 618 lockdep_assert_held_write(&kvm->mmu_lock); in lookup_s2_mmu() 667 lockdep_assert_held_write(&vcpu->kvm->mmu_lock); in get_s2_mmu_nested() 741 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) in kvm_vcpu_load_hw_mmu() 825 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_invalidate_vncr_ipa() 879 lockdep_assert_held_write(&kvm->mmu_lock); in invalidate_vncr_va() 1050 guard(write_lock)(&vcpu->kvm->mmu_lock); in kvm_handle_s1e2_tlbi() [all...] |
H A D | pkvm.c | 338 lockdep_assert_held_write(&kvm->mmu_lock); in pkvm_pgtable_stage2_map() 372 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock); in pkvm_pgtable_stage2_unmap() 384 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_wrprotect() 400 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_flush() 415 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_test_clear_young()
|
H A D | ptdump.c | 144 write_lock(&kvm->mmu_lock); in kvm_ptdump_guest_show() 146 write_unlock(&kvm->mmu_lock); in kvm_ptdump_guest_show()
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_mmu.h | 361 write_lock(&kvm->mmu_lock); in kvm_fault_lock() 363 read_lock(&kvm->mmu_lock); in kvm_fault_lock() 369 write_unlock(&kvm->mmu_lock); in kvm_fault_unlock() 371 read_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
|
/linux/arch/mips/kvm/ |
H A D | mmu.c | 265 * The caller must hold the @kvm->mmu_lock spinlock. 390 * The caller must hold the @kvm->mmu_lock spinlock. in BUILD_PTE_RANGE_OP() 412 * acquire @kvm->mmu_lock. 489 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast() 519 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast() 584 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. in kvm_mips_map_page() 600 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page() 608 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page() 637 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
|
H A D | mips.c | 196 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot() 201 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot() 231 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region() 237 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
|
/linux/drivers/accel/habanalabs/common/ |
H A D | command_buffer.c | 41 mutex_lock(&hdev->mmu_lock); in cb_map_mem() 53 mutex_unlock(&hdev->mmu_lock); in cb_map_mem() 62 mutex_unlock(&hdev->mmu_lock); in cb_map_mem() 72 mutex_lock(&hdev->mmu_lock); in cb_unmap_mem() 75 mutex_unlock(&hdev->mmu_lock); in cb_unmap_mem()
|
H A D | memory.c | 1185 mutex_lock(&hdev->mmu_lock); in map_device_va() 1191 mutex_unlock(&hdev->mmu_lock); in map_device_va() 1197 mutex_unlock(&hdev->mmu_lock); in map_device_va() 1352 mutex_lock(&hdev->mmu_lock); in unmap_device_va() 1365 mutex_unlock(&hdev->mmu_lock); in unmap_device_va() 2775 mutex_lock(&hdev->mmu_lock); in hl_vm_ctx_fini() 2781 mutex_unlock(&hdev->mmu_lock); in hl_vm_ctx_fini()
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gem_vma.c | 191 mutex_lock(&vm->mmu_lock); in msm_gem_vm_unusable() 221 mutex_unlock(&vm->mmu_lock); in msm_gem_vm_unusable() 230 lockdep_assert_held(&vm->mmu_lock); in vm_log() 278 * The mmu_lock is only needed when preallocation is used. But in msm_gem_vma_unmap() 283 mutex_lock(&vm->mmu_lock); in msm_gem_vma_unmap() 292 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_unmap() 314 * The mmu_lock is only needed when preallocation is used. But in msm_gem_vma_map() 319 mutex_lock(&vm->mmu_lock); in msm_gem_vma_map() 339 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_map() 653 mutex_lock(&vm->mmu_lock); in msm_vma_job_run() [all...] |
/linux/arch/x86/kvm/ |
H A D | debugfs.c | 112 write_lock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show() 132 write_unlock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
|
/linux/drivers/accel/habanalabs/common/mmu/ |
H A D | mmu.c | 51 mutex_init(&hdev->mmu_lock); in hl_mmu_init() 95 mutex_destroy(&hdev->mmu_lock); in hl_mmu_fini() 573 mutex_lock(&hdev->mmu_lock); in hl_mmu_get_tlb_info() 575 mutex_unlock(&hdev->mmu_lock); in hl_mmu_get_tlb_info() 681 mutex_lock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function() 685 mutex_unlock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function()
|
/linux/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 654 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_secondary_pte() 655 "%s called with kvm mmu_lock not held \n", __func__); in find_kvm_secondary_pte() 666 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_host_pte() 667 "%s called with kvm mmu_lock not held \n", __func__); in find_kvm_host_pte()
|
/linux/Documentation/virt/kvm/ |
H A D | locking.rst | 55 - kvm->arch.mmu_lock is an rwlock; critical sections for 57 also take kvm->arch.mmu_lock 268 ``kvm->mmu_lock``
|