/linux/arch/powerpc/kvm/ |
H A D | book3s_hv_uvmem.c | 361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, in kvmppc_next_nontransitioned_gfn() argument 377 * kvmppc_uvmem_slot and memslot. in kvmppc_next_nontransitioned_gfn() 392 const struct kvm_memory_slot *memslot, bool merge) in kvmppc_memslot_page_merge() argument 394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 404 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge() 431 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_delete() argument 433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete() 434 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete() 435 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete() 439 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_create() argument 468 struct kvm_memory_slot *memslot, *m; kvmppc_h_svm_init_start() local 662 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_abort() local 795 kvmppc_uv_migrate_mem_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvmppc_uv_migrate_mem_slot() argument 833 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_done() local [all...] |
H A D | book3s_64_mmu_hv.c | 206 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument 220 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma() 516 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local 579 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault() 581 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault() 583 /* No memslot means it's an emulated MMIO region */ in kvmppc_book3s_hv_page_fault() 584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault() 592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault() 604 hva = gfn_to_hva_memslot(memslot, gf in kvmppc_book3s_hv_page_fault() 742 struct kvm_memory_slot *memslot; kvmppc_rmap_reset() local 763 kvmppc_unmap_hpte(struct kvm * kvm,unsigned long i,struct kvm_memory_slot * memslot,unsigned long * rmapp,unsigned long gfn) kvmppc_unmap_hpte() argument 804 kvm_unmap_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_unmap_rmapp() argument 856 kvmppc_core_flush_memslot_hv(struct kvm * kvm,struct kvm_memory_slot * memslot) kvmppc_core_flush_memslot_hv() argument 882 kvm_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_age_rmapp() argument 953 kvm_test_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_test_age_rmapp() argument 1087 kvmppc_harvest_vpa_dirty(struct kvmppc_vpa * vpa,struct kvm_memory_slot * memslot,unsigned long * map) kvmppc_harvest_vpa_dirty() argument 1105 kvmppc_hv_get_dirty_log_hpt(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map) kvmppc_hv_get_dirty_log_hpt() argument 1130 struct kvm_memory_slot *memslot; kvmppc_pin_guest_page() local 1162 struct kvm_memory_slot *memslot; kvmppc_unpin_guest_page() local 1249 struct kvm_memory_slot *memslot = resize_hpt_rehash_hpte() local [all...] |
H A D | book3s_64_mmu_radix.c | 425 const struct kvm_memory_slot *memslot, in kvmppc_unmap_pte() argument 441 if (!memslot) { in kvmppc_unmap_pte() 442 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte() 443 if (!memslot) in kvmppc_unmap_pte() 456 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte() 458 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) in kvmppc_unmap_pte() 459 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte() 470 * turned off for a memslot while the VM is running. The new memslot 471 * becomes visible to page faults before the memslot commi 823 kvmppc_book3s_instantiate_page(struct kvm_vcpu * vcpu,unsigned long gpa,struct kvm_memory_slot * memslot,bool writing,pte_t * inserted_pte,unsigned int * levelp) kvmppc_book3s_instantiate_page() argument 931 struct kvm_memory_slot *memslot; kvmppc_book3s_radix_page_fault() local 1011 kvm_unmap_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_unmap_radix() argument 1030 kvm_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_age_radix() argument 1058 kvm_test_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn) kvm_test_age_radix() argument 1078 kvm_radix_test_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot,int pagenum) kvm_radix_test_clear_dirty() argument 1133 kvmppc_hv_get_dirty_log_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map) kvmppc_hv_get_dirty_log_radix() argument 1158 kvmppc_radix_flush_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvmppc_radix_flush_memslot() argument [all...] |
H A D | book3s_hv_rm_mmu.c | 95 /* Update the dirty bitmap of a memslot */ 96 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, in kvmppc_update_dirty_map() argument 101 if (!psize || !memslot->dirty_bitmap) in kvmppc_update_dirty_map() 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map() 112 struct kvm_memory_slot *memslot; in kvmppc_set_dirty_from_hpte() local 118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte() 119 if (memslot && memslot->dirty_bitmap) in kvmppc_set_dirty_from_hpte() 120 kvmppc_update_dirty_map(memslot, gf in kvmppc_set_dirty_from_hpte() 129 struct kvm_memory_slot *memslot; revmap_for_hpte() local 155 struct kvm_memory_slot *memslot; remove_revmap_chain() local 193 struct kvm_memory_slot *memslot; kvmppc_do_h_enter() local 888 struct kvm_memory_slot *memslot; kvmppc_get_hpa() local 928 struct kvm_memory_slot *memslot; kvmppc_do_h_page_init_zero() local [all...] |
H A D | trace_hv.h | 294 struct kvm_memory_slot *memslot, unsigned long ea, 297 TP_ARGS(vcpu, hptep, memslot, ea, dsisr), 317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 318 __entry->slot_flags = memslot ? memslot->flags : 0;
|
H A D | book3s_hv_nested.c | 805 struct kvm_memory_slot *memslot; in kvmhv_release_all_nested() local 825 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) in kvmhv_release_all_nested() 826 kvmhv_free_memslot_nest_rmap(memslot); in kvmhv_release_all_nested() 1037 const struct kvm_memory_slot *memslot, in kvmhv_remove_nest_rmap_range() argument 1044 if (!memslot) in kvmhv_remove_nest_rmap_range() 1046 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range() 1053 unsigned long *rmap = &memslot->arch.rmap[gfn]; in kvmhv_remove_nest_rmap_range() 1527 struct kvm_memory_slot *memslot; in __kvmhv_nested_page_fault() local 1594 /* 1. Get the corresponding host memslot */ in __kvmhv_nested_page_fault() 1596 memslot in __kvmhv_nested_page_fault() [all...] |
H A D | book3s.h | 11 struct kvm_memory_slot *memslot);
|
H A D | book3s_64_vio.c | 356 struct kvm_memory_slot *memslot; in kvmppc_tce_to_ua() local 358 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); in kvmppc_tce_to_ua() 359 if (!memslot) in kvmppc_tce_to_ua() 362 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_tce_to_ua()
|
H A D | book3s_hv.c | 969 /* Copy guest memory in place - must reside within a single memslot */ 5266 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_hv() local 5279 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv() 5281 if (!memslot || !memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv() 5288 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log_hv() 5289 buf = memslot->dirty_bitmap + n / sizeof(long); in kvm_vm_ioctl_get_dirty_log_hv() 5293 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv() 5295 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv() 5301 * memslot' in kvm_vm_ioctl_get_dirty_log_hv() 5475 struct kvm_memory_slot *memslot; kvmppc_hv_setup_htab_rma() local 6434 struct kvm_memory_slot *memslot; kvmhv_svm_off() local [all...] |
H A D | book3s.c | 848 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument 863 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot() argument 865 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot()
|
/linux/arch/arm64/kvm/ |
H A D | mmu.c | 163 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument 165 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging() 351 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument 353 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot() 354 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot() 369 struct kvm_memory_slot *memslot; in stage2_flush_vm() local 376 kvm_for_each_memslot(memslot, bkt, slots) in stage2_flush_vm() 377 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm() 995 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument 1046 struct kvm_memory_slot *memslot; stage2_unmap_vm() local 1204 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); kvm_mmu_wp_memory_region() local 1232 struct kvm_memory_slot *memslot; kvm_mmu_split_memory_region() local 1290 fault_supports_stage2_huge_mapping(struct kvm_memory_slot * memslot,unsigned long hva,unsigned long map_size) fault_supports_stage2_huge_mapping() argument 1364 transparent_hugepage_adjust(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long hva,kvm_pfn_t * pfnp,phys_addr_t * ipap) transparent_hugepage_adjust() argument 1482 user_mem_abort(struct kvm_vcpu * vcpu,phys_addr_t fault_ipa,struct kvm_s2_trans * nested,struct kvm_memory_slot * memslot,unsigned long hva,bool fault_is_perm) user_mem_abort() argument 1831 struct kvm_memory_slot *memslot; kvm_handle_guest_abort() local [all...] |
/linux/virt/kvm/ |
H A D | dirty_ring.c | 55 struct kvm_memory_slot *memslot; in kvm_reset_dirty_gfn() local 64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in kvm_reset_dirty_gfn() 66 if (!memslot || (offset + __fls(mask)) >= memslot->npages) in kvm_reset_dirty_gfn() 70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); in kvm_reset_dirty_gfn() 129 * the various memslot accesses. in kvm_dirty_ring_reset()
|
H A D | kvm_main.c | 328 const struct kvm_memory_slot *memslot) in kvm_flush_remote_tlbs_memslot() argument 331 * All current use cases for flushing the TLBs for a specific memslot in kvm_flush_remote_tlbs_memslot() 333 * mmu_lock. The interaction between the various operations on memslot in kvm_flush_remote_tlbs_memslot() 335 * operation is observed by any other operation on the same memslot. in kvm_flush_remote_tlbs_memslot() 338 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot() 532 * least one memslot was found, i.e. if the handler found guest memory. 555 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 742 * Prevent memslot modification between range_start() and range_end() in kvm_mmu_notifier_invalidate_range_start() 758 * i.e. don't need to rely on memslot overla in kvm_mmu_notifier_invalidate_range_start() 944 kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot) kvm_destroy_dirty_bitmap() argument 969 struct kvm_memory_slot *memslot; kvm_free_memslots() local 1434 kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot) kvm_alloc_dirty_bitmap() argument 2157 kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot) kvm_get_dirty_log() argument 2222 struct kvm_memory_slot *memslot; kvm_get_dirty_log_protect() local 2333 struct kvm_memory_slot *memslot; kvm_clear_dirty_log_protect() local 2667 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); kvm_is_visible_gfn() local 2675 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); kvm_vcpu_is_visible_gfn() local 3287 __kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len) __kvm_write_guest_page() argument 3511 mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn) mark_page_dirty_in_slot() argument 3537 struct kvm_memory_slot *memslot; mark_page_dirty() local 3546 struct kvm_memory_slot *memslot; kvm_vcpu_mark_page_dirty() local [all...] |
H A D | pfncache.c | 81 * If the page was cached from a memslot, make sure the memslots have in kvm_gpc_check() 165 .slot = gpc->memslot, in hva_to_pfn_retry() 287 gpc->memslot = NULL; in __kvm_gpc_refresh() 303 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh() 304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh() 312 * Even if the GPA and/or the memslot generation changed, the in __kvm_gpc_refresh() 345 * Some/all of the uhva, gpa, and memslot generation info may still be in __kvm_gpc_refresh() 467 * memslot generation. The PFN lookup needs to be redone every in kvm_gpc_deactivate()
|
/linux/arch/riscv/kvm/ |
H A D | vcpu_exit.c | 19 struct kvm_memory_slot *memslot; in gstage_page_fault() local 27 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault() 28 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault() 46 ret = kvm_riscv_mmu_map(vcpu, memslot, fault_addr, hva, in gstage_page_fault()
|
/linux/arch/powerpc/include/asm/ |
H A D | kvm_book3s.h | 198 const struct kvm_memory_slot *memslot, 205 struct kvm_memory_slot *memslot, 214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 221 struct kvm_memory_slot *memslot, unsigned long *map); 223 const struct kvm_memory_slot *memslot); 241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, 258 struct kvm_memory_slot *memslot, unsigned long *map); 260 struct kvm_memory_slot *memslot, [all...] |
H A D | kvm_book3s_64.h | 66 * rmap entry in the memslot. The list is always terminated by a "single entry" 68 * a single entry then this is itself in the rmap entry of the memslot, not a 488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, in slot_is_aligned() argument 495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned() 635 const struct kvm_memory_slot *memslot,
|
H A D | kvm_ppc.h | 174 struct kvm_memory_slot *memslot, unsigned long porder); 217 struct kvm_memory_slot *memslot); 278 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
|
/linux/include/linux/ |
H A D | kvm_types.h | 57 struct kvm_memory_slot *memslot; member 64 struct kvm_memory_slot *memslot; member
|
/linux/arch/riscv/include/asm/ |
H A D | kvm_mmu.h | 14 int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
|
/linux/arch/x86/include/asm/uv/ |
H A D | uv_geo.h | 57 char memslot; /* The memory slot on the bus */ member
|
/linux/tools/testing/selftests/kvm/lib/ |
H A D | kvm_util.c | 507 * read-only memslots as MMIO, and creating a read-only memslot for the in __vm_create() 1118 * Install a unique fd for each memslot so that the fd in vm_mem_add() 1184 * memslot - KVM memory slot ID 1190 * using kvm memory slot ID given by memslot. TEST_ASSERT failure 1191 * on error (e.g. currently no memory region using memslot as a KVM 1195 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument 1200 memslot) in memslot2region() 1201 if (region->region.slot == memslot) in memslot2region() 1205 " requested slot: %u\n", memslot); in memslot2region() 2105 * memslot 2119 __vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot,bool protected) __vm_phy_pages_alloc() argument 2165 vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot) vm_phy_page_alloc() argument [all...] |
/linux/arch/x86/kvm/mmu/ |
H A D | mmu.c | 1431 * of memslot has no such restriction, so the range can cross two large in kvm_arch_mmu_enable_log_dirty_pt_masked() 1650 * (and mmu_invalidate_seq). The only exception is memslot deletion; in kvm_unmap_gfn_range() 2040 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the in kvm_sync_page_check() 3236 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() in host_pfn_mapping_level() 3241 * read-only memslot. in host_pfn_mapping_level() 3958 * Check if memslot metadata actually needs to be allocated, e.g. all in mmu_first_shadow_root_alloc() 3973 * were made now versus when the memslot was created. in mmu_first_shadow_root_alloc() 4650 * Retry the page fault if the gfn hit a memslot that is being deleted in kvm_mmu_faultin_pfn() 4651 * or moved. This ensures any existing SPTEs for the old memslot will in kvm_mmu_faultin_pfn() 4733 * root was invalidated by a memslot updat 6784 const struct kvm_memory_slot *memslot; kvm_rmap_zap_gfn_range() local 6850 kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level) kvm_mmu_slot_remove_write_access() argument 7098 kvm_mmu_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,u64 start,u64 end,int target_level) kvm_mmu_try_split_huge_pages() argument 7117 kvm_mmu_slot_try_split_huge_pages(struct kvm * kvm,const struct kvm_memory_slot * memslot,int target_level) kvm_mmu_slot_try_split_huge_pages() argument 7213 kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot) kvm_mmu_slot_leaf_clear_dirty() argument [all...] |
/linux/tools/testing/selftests/kvm/lib/x86/ |
H A D | vmx.c | 498 uint32_t memslot) in nested_map_memslot() argument 502 memslot2region(vm, memslot); in nested_map_memslot()
|
/linux/arch/x86/include/asm/ |
H A D | kvm_host.h | 1369 * memslot, etc... Note, zapping shadow pages on this list doesn't 1542 * is used as one input when determining whether certain memslot 2042 const struct kvm_memory_slot *memslot, 2045 const struct kvm_memory_slot *memslot, 2048 const struct kvm_memory_slot *memslot, 2052 const struct kvm_memory_slot *memslot); 2054 const struct kvm_memory_slot *memslot);
|