Home
last modified time | relevance | path

Searched refs:mmu_lock (Results 1 – 25 of 39) sorted by relevance

12

/linux/virt/kvm/
H A Dkvm_mm.h8 * for the mmu_lock. These macros, for use in common code
14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
/linux/arch/x86/kvm/mmu/
H A Dpage_track.c93 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_write_track_add_gfn()
116 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_write_track_remove_gfn()
239 write_lock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
241 write_unlock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
257 write_lock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
259 write_unlock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
335 write_lock(&kvm->mmu_lock); in kvm_write_track_add_gfn()
337 write_unlock(&kvm->mmu_lock); in kvm_write_track_add_gfn()
365 write_lock(&kvm->mmu_lock); in kvm_write_track_remove_gfn()
367 write_unlock(&kvm->mmu_lock); in kvm_write_track_remove_gfn()
[all...]
H A Dtdp_mmu.c26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
129 * role.invalid are protected by mmu_lock. in tdp_mmu_next_root()
131 lockdep_assert_held(&kvm->mmu_lock); in tdp_mmu_next_root()
171 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
181 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
185 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
186 * the implication being that any flow that holds mmu_lock for read is
188 * Holding mmu_lock for write obviates the need for RCU protection as the list
202 * used without holding the mmu_lock a
[all...]
H A Dmmu.c760 * an emulated write will see the elevated count and acquire mmu_lock in account_shadowed()
863 * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
864 * operates with mmu_lock held for write), but rmaps can be walked without
865 * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
869 * done while holding mmu_lock for write. This allows a task walking rmaps
870 * without holding mmu_lock to concurrently walk the same entries as a task
871 * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify
890 * do so while holding mmu_lock for write, and are mutually exclusive. in __kvm_rmap_lock()
938 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_lock()
960 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_rmap_unlock()
[all...]
/linux/arch/powerpc/kvm/
H A Dbook3s_hv_nested.c766 * so we don't need to hold kvm->mmu_lock. in kvmhv_release_nested()
783 spin_lock(&kvm->mmu_lock); in kvmhv_remove_nested()
789 spin_unlock(&kvm->mmu_lock); in kvmhv_remove_nested()
808 spin_lock(&kvm->mmu_lock); in kvmhv_release_all_nested()
818 spin_unlock(&kvm->mmu_lock); in kvmhv_release_all_nested()
835 spin_lock(&kvm->mmu_lock); in kvmhv_flush_nested()
837 spin_unlock(&kvm->mmu_lock); in kvmhv_flush_nested()
852 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
856 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
870 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
[all...]
H A Dbook3s_mmu_hpte.c63 spin_lock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
92 spin_unlock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
104 spin_lock(&vcpu3s->mmu_lock); in invalidate_pte()
108 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
121 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
363 spin_lock_init(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_init()
H A Dbook3s_64_mmu_radix.c422 /* Called with kvm->mmu_lock held */
647 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte()
783 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte()
852 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
857 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
990 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
994 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1010 /* Called with kvm->mmu_lock held */
1029 /* Called with kvm->mmu_lock held */
1057 /* Called with kvm->mmu_lock hel
[all...]
H A Dbook3s_hv_rm_mmu.c248 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
263 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
277 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
938 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
950 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
966 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
981 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
H A Dbook3s_64_mmu_host.c151 spin_lock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
206 spin_unlock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
H A Dbook3s_64_mmu_hv.c615 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
620 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
749 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset()
756 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset()
1377 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot()
1384 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
H A De500_mmu_host.c366 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
377 * We are holding kvm->mmu_lock so a notifier invalidate in kvmppc_e500_shadow_map()
468 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
/linux/arch/arm64/kvm/
H A Dmmu.c81 cond_resched_rwlock_write(&kvm->mmu_lock); in stage2_apply_range()
110 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) in need_split_memcache_topup_or_resched()
127 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_mmu_split_huge_pages()
139 write_unlock(&kvm->mmu_lock); in kvm_mmu_split_huge_pages()
145 write_lock(&kvm->mmu_lock); in kvm_mmu_split_huge_pages()
323 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
333 lockdep_assert_held_write(&kvm->mmu_lock); in __unmap_stage2_range()
373 write_lock(&kvm->mmu_lock); in stage2_flush_vm()
381 write_unlock(&kvm->mmu_lock); in stage2_flush_vm()
1051 write_lock(&kvm->mmu_lock); in stage2_unmap_vm()
[all...]
H A Dnested.c33 * mmu_lock held.
473 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock); in get_guest_mapping_ttl()
597 write_lock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid()
609 write_unlock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid()
618 lockdep_assert_held_write(&kvm->mmu_lock); in lookup_s2_mmu()
667 lockdep_assert_held_write(&vcpu->kvm->mmu_lock); in get_s2_mmu_nested()
741 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) in kvm_vcpu_load_hw_mmu()
825 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_invalidate_vncr_ipa()
879 lockdep_assert_held_write(&kvm->mmu_lock); in invalidate_vncr_va()
1050 guard(write_lock)(&vcpu->kvm->mmu_lock); in kvm_handle_s1e2_tlbi()
[all...]
H A Dpkvm.c338 lockdep_assert_held_write(&kvm->mmu_lock); in pkvm_pgtable_stage2_map()
372 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock); in pkvm_pgtable_stage2_unmap()
384 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_wrprotect()
400 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_flush()
415 lockdep_assert_held(&kvm->mmu_lock); in pkvm_pgtable_stage2_test_clear_young()
H A Dptdump.c144 write_lock(&kvm->mmu_lock); in kvm_ptdump_guest_show()
146 write_unlock(&kvm->mmu_lock); in kvm_ptdump_guest_show()
/linux/arch/arm64/include/asm/
H A Dkvm_mmu.h361 write_lock(&kvm->mmu_lock); in kvm_fault_lock()
363 read_lock(&kvm->mmu_lock); in kvm_fault_lock()
369 write_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
371 read_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
/linux/arch/mips/kvm/
H A Dmmu.c265 * The caller must hold the @kvm->mmu_lock spinlock.
390 * The caller must hold the @kvm->mmu_lock spinlock. in BUILD_PTE_RANGE_OP()
412 * acquire @kvm->mmu_lock.
489 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
519 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
584 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. in kvm_mips_map_page()
600 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page()
608 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
637 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
H A Dmips.c196 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
201 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
231 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
237 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
/linux/drivers/accel/habanalabs/common/
H A Dcommand_buffer.c41 mutex_lock(&hdev->mmu_lock); in cb_map_mem()
53 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
62 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
72 mutex_lock(&hdev->mmu_lock); in cb_unmap_mem()
75 mutex_unlock(&hdev->mmu_lock); in cb_unmap_mem()
H A Dmemory.c1185 mutex_lock(&hdev->mmu_lock); in map_device_va()
1191 mutex_unlock(&hdev->mmu_lock); in map_device_va()
1197 mutex_unlock(&hdev->mmu_lock); in map_device_va()
1352 mutex_lock(&hdev->mmu_lock); in unmap_device_va()
1365 mutex_unlock(&hdev->mmu_lock); in unmap_device_va()
2775 mutex_lock(&hdev->mmu_lock); in hl_vm_ctx_fini()
2781 mutex_unlock(&hdev->mmu_lock); in hl_vm_ctx_fini()
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c191 mutex_lock(&vm->mmu_lock); in msm_gem_vm_unusable()
221 mutex_unlock(&vm->mmu_lock); in msm_gem_vm_unusable()
230 lockdep_assert_held(&vm->mmu_lock); in vm_log()
278 * The mmu_lock is only needed when preallocation is used. But in msm_gem_vma_unmap()
283 mutex_lock(&vm->mmu_lock); in msm_gem_vma_unmap()
292 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_unmap()
314 * The mmu_lock is only needed when preallocation is used. But in msm_gem_vma_map()
319 mutex_lock(&vm->mmu_lock); in msm_gem_vma_map()
339 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_map()
653 mutex_lock(&vm->mmu_lock); in msm_vma_job_run()
[all...]
/linux/arch/x86/kvm/
H A Ddebugfs.c112 write_lock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
132 write_unlock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
/linux/drivers/accel/habanalabs/common/mmu/
H A Dmmu.c51 mutex_init(&hdev->mmu_lock); in hl_mmu_init()
95 mutex_destroy(&hdev->mmu_lock); in hl_mmu_fini()
573 mutex_lock(&hdev->mmu_lock); in hl_mmu_get_tlb_info()
575 mutex_unlock(&hdev->mmu_lock); in hl_mmu_get_tlb_info()
681 mutex_lock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function()
685 mutex_unlock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function()
/linux/arch/powerpc/include/asm/
H A Dkvm_book3s_64.h654 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_secondary_pte()
655 "%s called with kvm mmu_lock not held \n", __func__); in find_kvm_secondary_pte()
666 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_host_pte()
667 "%s called with kvm mmu_lock not held \n", __func__); in find_kvm_host_pte()
/linux/Documentation/virt/kvm/
H A Dlocking.rst55 - kvm->arch.mmu_lock is an rwlock; critical sections for
57 also take kvm->arch.mmu_lock
268 ``kvm->mmu_lock``

12