Home
last modified time | relevance | path

Searched refs:gpa (Results 1 – 25 of 129) sorted by relevance

123456

/linux/tools/testing/selftests/kvm/x86/
H A Dprivate_mem_conversions_test.c30 #define memcmp_g(gpa, pattern, size) \ argument
32 uint8_t *mem = (uint8_t *)gpa; \
38 pattern, i, gpa + i, mem[i]); \
41 static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size) in memcmp_h() argument
48 pattern, gpa + i, mem[i]); in memcmp_h()
73 static void guest_sync_shared(uint64_t gpa, uint64_t size, in guest_sync_shared() argument
76 GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern); in guest_sync_shared()
79 static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) in guest_sync_private() argument
81 GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern); in guest_sync_private()
89 static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, in guest_map_mem() argument
[all …]
H A Dsmaller_maxphyaddr_emulation_test.c52 uint64_t gpa; in main() local
69 gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE, in main()
71 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc"); in main()
/linux/tools/testing/selftests/kvm/
H A Dmmu_stress_test.c25 uint64_t gpa; in guest_code() local
29 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code()
30 vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); in guest_code()
34 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code()
35 *((volatile uint64_t *)gpa); in guest_code()
52 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code()
54 asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */ in guest_code()
56 asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); in guest_code()
58 vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); in guest_code()
70 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code()
[all …]
H A Dmemslot_perf_test.c188 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument
195 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); in vm_gpa2hva()
196 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
198 gpa -= MEM_GPA; in vm_gpa2hva()
200 gpage = gpa / guest_page_size; in vm_gpa2hva()
201 pgoffs = gpa % guest_page_size; in vm_gpa2hva()
334 uint64_t gpa; in prepare_vm() local
340 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); in prepare_vm()
341 TEST_ASSERT(gpa == guest_addr, in prepare_vm()
640 uint64_t gpa, ctr; in test_memslot_do_unmap() local
[all …]
H A Dmemslot_modification_stress_test.c60 uint64_t gpa; in add_remove_memslot() local
67 gpa = memstress_args.gpa - pages * vm->page_size; in add_remove_memslot()
71 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, in add_remove_memslot()
/linux/arch/powerpc/kvm/
H A Dbook3s_64_mmu_radix.c149 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local
204 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree()
205 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree()
207 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree()
215 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree()
423 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument
430 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte()
434 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte()
435 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte()
454 gpa &= ~(page_size - 1); in kvmppc_unmap_pte()
[all …]
H A Dbook3s_hv_uvmem.c234 unsigned long gpa; member
516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument
536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
569 gpa, 0, page_shift); in __kvmppc_svm_page_out()
589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument
595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out()
645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages()
647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages()
695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
[all …]
/linux/virt/kvm/
H A Dpfncache.c60 static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva, in kvm_gpc_is_valid_len() argument
63 unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : in kvm_gpc_is_valid_len()
64 offset_in_page(gpa); in kvm_gpc_is_valid_len()
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) in kvm_gpc_check()
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_check()
166 .gfn = gpa_to_gfn(gpc->gpa), in hva_to_pfn_retry()
256 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva) in __kvm_gpc_refresh() argument
267 if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))) in __kvm_gpc_refresh()
283 if (kvm_is_error_gpa(gpa)) { in __kvm_gpc_refresh()
286 gpc->gpa = INVALID_GPA; in __kvm_gpc_refresh()
[all …]
/linux/arch/s390/kvm/
H A Dgaccess.h152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local
154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc()
178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local
180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc()
190 unsigned long *gpa, enum gacc_mode mode,
196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
209 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old,
371 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument
374 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs()
[all …]
H A Dvsie.c692 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument
696 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page()
699 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); in pin_guest_page()
704 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument
708 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page()
773 gpa_t gpa; in pin_blocks() local
776 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks()
778 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; in pin_blocks()
779 if (gpa) { in pin_blocks()
780 if (gpa < 2 * PAGE_SIZE) in pin_blocks()
[all …]
H A Dgaccess.c435 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument
437 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table()
462 unsigned long *gpa, const union asce asce, in guest_translate() argument
621 *gpa = raddr.addr; in guest_translate()
645 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() argument
655 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key()
708 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() argument
722 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); in vcpu_check_access_key()
791 unsigned long gpa; in guest_range_to_gpas() local
801 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); in guest_range_to_gpas()
[all …]
/linux/arch/x86/include/asm/uv/
H A Duv_hub.h461 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument
463 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space()
467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument
475 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram()
476 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram()
478 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram()
485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument
490 return gpa >> n_lshift; in uv_gpa_to_gnode()
492 return uv_gam_range(gpa)->nasid >> 1; in uv_gpa_to_gnode()
496 static inline int uv_gpa_to_pnode(unsigned long gpa) in uv_gpa_to_pnode() argument
[all …]
/linux/arch/x86/kvm/mmu/
H A Dpage_track.h30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes);
41 static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, in __kvm_page_track_write() argument
50 static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_page_track_write() argument
53 __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); in kvm_page_track_write()
55 kvm_mmu_track_write(vcpu, gpa, new, bytes); in kvm_page_track_write()
/linux/drivers/gpu/drm/i915/gvt/
H A Dpage_track.c159 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, in intel_vgpu_page_track_handler() argument
165 page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler()
171 intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler()
173 ret = page_track->handler(page_track, gpa, data, bytes); in intel_vgpu_page_track_handler()
175 gvt_err("guest page write error, gpa %llx\n", gpa); in intel_vgpu_page_track_handler()
/linux/arch/x86/kvm/
H A Dmmu.h103 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
260 bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
261 int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
294 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
299 gpa_t gpa, u64 access, in kvm_translate_gpa() argument
303 return gpa; in kvm_translate_gpa()
304 return translate_nested_gpa(vcpu, gpa, access, exception); in kvm_translate_gpa()
317 static inline bool kvm_is_addr_direct(struct kvm *kvm, gpa_t gpa) in kvm_is_addr_direct() argument
321 return !gpa_direct_bits || (gpa & gpa_direct_bits); in kvm_is_addr_direct()
H A Dcpuid.h69 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_legal_gpa() argument
71 return !(gpa & vcpu->arch.reserved_gpa_bits); in kvm_vcpu_is_legal_gpa()
75 gpa_t gpa, gpa_t alignment) in kvm_vcpu_is_legal_aligned_gpa() argument
77 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_legal_aligned_gpa()
80 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) in page_address_valid() argument
82 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); in page_address_valid()
/linux/arch/riscv/kvm/
H A Dtlb.c24 gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_vmid_gpa() argument
36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa()
52 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_gpa() argument
64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa()
343 gpa_t gpa, gpa_t gpsz, in kvm_riscv_hfence_gvma_vmid_gpa() argument
351 data.addr = gpa; in kvm_riscv_hfence_gvma_vmid_gpa()
/linux/arch/x86/kvm/vmx/
H A Dcommon.h77 static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa) in vt_is_tdx_private_gpa() argument
80 return !kvm_is_addr_direct(kvm, gpa); in vt_is_tdx_private_gpa()
83 static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa, in __vmx_handle_ept_violation() argument
105 if (vt_is_tdx_private_gpa(vcpu->kvm, gpa)) in __vmx_handle_ept_violation()
108 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); in __vmx_handle_ept_violation()
H A Dtdx.c1183 u64 gpa = tdx->map_gpa_next; in __tdx_map_gpa() local
1198 tdx->vcpu.run->hypercall.args[0] = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(tdx->vcpu.kvm)); in __tdx_map_gpa()
1200 tdx->vcpu.run->hypercall.args[2] = vt_is_tdx_private_gpa(tdx->vcpu.kvm, gpa) ? in __tdx_map_gpa()
1211 u64 gpa = tdx->vp_enter_args.r12; in tdx_map_gpa() local
1228 if (gpa + size <= gpa || !kvm_vcpu_is_legal_gpa(vcpu, gpa) || in tdx_map_gpa()
1229 !kvm_vcpu_is_legal_gpa(vcpu, gpa + size - 1) || in tdx_map_gpa()
1230 (vt_is_tdx_private_gpa(vcpu->kvm, gpa) != in tdx_map_gpa()
1231 vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))) { in tdx_map_gpa()
1236 if (!PAGE_ALIGNED(gpa) || !PAGE_ALIGNED(size)) { in tdx_map_gpa()
1241 tdx->map_gpa_end = gpa + size; in tdx_map_gpa()
[all …]
/linux/include/trace/events/
H A Dkvm.h134 TP_PROTO(int type, int len, u64 gpa, void *val),
135 TP_ARGS(type, len, gpa, val),
140 __field( u64, gpa )
147 __entry->gpa = gpa;
156 __entry->len, __entry->gpa, __entry->val)
169 TP_PROTO(int type, int len, u64 gpa, void *val),
170 TP_ARGS(type, len, gpa, val),
175 __field( u64, gpa )
182 __entry->gpa = gpa;
191 __entry->len, __entry->gpa, __entry->val)
/linux/arch/riscv/include/asm/
H A Dkvm_mmu.h11 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
13 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size);
15 gpa_t gpa, unsigned long hva, bool is_write,
H A Dkvm_tlb.h35 gpa_t gpa, gpa_t gpsz,
38 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
63 gpa_t gpa, gpa_t gpsz,
/linux/arch/x86/include/asm/
H A Dtdx.h52 u64 gpa; member
173 u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_e…
174 u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *…
176 u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *…
177 u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
182 u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2);
193 u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
/linux/tools/testing/selftests/kvm/include/x86/
H A Dsev.h123 static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, in sev_launch_update_data() argument
127 .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), in sev_launch_update_data()
134 static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, in snp_launch_update_data() argument
139 .gfn_start = gpa >> PAGE_SHIFT, in snp_launch_update_data()
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c934 uint64_t gpa, uint64_t size, void *hva) in __vm_set_user_memory_region() argument
939 .guest_phys_addr = gpa, in __vm_set_user_memory_region()
948 uint64_t gpa, uint64_t size, void *hva) in vm_set_user_memory_region() argument
950 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region()
961 uint64_t gpa, uint64_t size, void *hva, in __vm_set_user_memory_region2() argument
967 .guest_phys_addr = gpa, in __vm_set_user_memory_region2()
980 uint64_t gpa, uint64_t size, void *hva, in vm_set_user_memory_region2() argument
983 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, in vm_set_user_memory_region2()
1301 uint64_t gpa, len; in vm_guest_mem_fallocate() local
1305 for (gpa = base; gpa < end; gpa += len) { in vm_guest_mem_fallocate()
[all …]

123456