Lines Matching +full:memory +full:- +full:region
1 // SPDX-License-Identifier: GPL-2.0-only
44 * flags - The flags to pass when opening KVM_DEV_PATH.
86 * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the in get_module_param_integer()
96 TEST_ASSERT(value[r - 1] == '\n', in get_module_param_integer()
97 "Expected trailing newline, got char '%c'", value[r - 1]); in get_module_param_integer()
101 * trailing non-NUL characters in the string. in get_module_param_integer()
103 value[r - 1] = '\0'; in get_module_param_integer()
157 * cap - Capability
189 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
194 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
198 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
199 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
202 vm->stats.fd = vm_get_stats_fd(vm); in vm_open()
204 vm->stats.fd = -1; in vm_open()
210 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", in vm_guest_mode_string()
211 [VM_MODE_P52V48_16K] = "PA-bits:52, VA-bits:48, 16K pages", in vm_guest_mode_string()
212 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", in vm_guest_mode_string()
213 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", in vm_guest_mode_string()
214 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", in vm_guest_mode_string()
215 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", in vm_guest_mode_string()
216 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", in vm_guest_mode_string()
217 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", in vm_guest_mode_string()
218 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", in vm_guest_mode_string()
219 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", in vm_guest_mode_string()
220 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", in vm_guest_mode_string()
221 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", in vm_guest_mode_string()
222 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", in vm_guest_mode_string()
223 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", in vm_guest_mode_string()
224 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", in vm_guest_mode_string()
225 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", in vm_guest_mode_string()
257 * Initializes vm->vpages_valid to match the canonical VA space of the
261 * range addressed by a single page table into a low and high region
263 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
267 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
268 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
269 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
270 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_vaddr_populate_bitmap()
271 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
279 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
281 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
282 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
283 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
284 hash_init(vm->regions.slot_hash); in ____vm_create()
286 vm->mode = shape.mode; in ____vm_create()
287 vm->type = shape.type; in ____vm_create()
289 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; in ____vm_create()
290 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; in ____vm_create()
291 vm->page_size = vm_guest_mode_params[vm->mode].page_size; in ____vm_create()
292 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; in ____vm_create()
295 switch (vm->mode) { in ____vm_create()
297 vm->pgtable_levels = 4; in ____vm_create()
300 vm->pgtable_levels = 3; in ____vm_create()
303 vm->pgtable_levels = 4; in ____vm_create()
306 vm->pgtable_levels = 3; in ____vm_create()
310 vm->pgtable_levels = 4; in ____vm_create()
314 vm->pgtable_levels = 3; in ____vm_create()
320 vm->pgtable_levels = 4; in ____vm_create()
323 vm->pgtable_levels = 3; in ____vm_create()
327 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
330 * Ignore KVM support for 5-level paging (vm->va_bits == 57), in ____vm_create()
332 * isn't for this mode (48-bit virtual address space). in ____vm_create()
334 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
336 vm->va_bits); in ____vm_create()
338 vm->pa_bits); in ____vm_create()
339 vm->pgtable_levels = 4; in ____vm_create()
340 vm->va_bits = 48; in ____vm_create()
342 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); in ____vm_create()
346 vm->pgtable_levels = 5; in ____vm_create()
349 vm->pgtable_levels = 5; in ____vm_create()
352 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode); in ____vm_create()
356 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types"); in ____vm_create()
357 if (vm->pa_bits != 40) in ____vm_create()
358 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
363 /* Limit to VA-bit canonical virtual addresses. */ in ____vm_create()
364 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
367 /* Limit physical addresses to PA-bits. */ in ____vm_create()
368 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
370 /* Allocate and setup memory for guest. */ in ____vm_create()
371 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
387 "nr_vcpus = %d too large for host, max-vcpus = %d", in vm_nr_pages_required()
392 * test code and other per-VM assets that will be loaded into memslot0. in vm_nr_pages_required()
396 /* Account for the per-vCPU stacks on behalf of the test. */ in vm_nr_pages_required()
401 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
465 vm->memslots[i] = 0; in __vm_create()
472 * read-only memslots as MMIO, and creating a read-only memslot for the in __vm_create()
473 * MMIO region would prevent silently clobbering the MMIO region. in __vm_create()
476 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
494 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
495 * nr_vcpus - VCPU count
496 * extra_mem_pages - Non-slot0 physical memory total size
497 * guest_code - Guest entry point
498 * vcpuids - VCPU IDs
507 * no real memory allocation for non-slot0 memory in this function.
544 * vm - VM that has been released before
549 * global state, such as the irqchip and the memory regions that are mapped
555 struct userspace_mem_region *region; in kvm_vm_restart() local
558 if (vmp->has_irqchip) in kvm_vm_restart()
561 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
562 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in kvm_vm_restart()
568 ret, errno, region->region.slot, in kvm_vm_restart()
569 region->region.flags, in kvm_vm_restart()
570 region->region.guest_phys_addr, in kvm_vm_restart()
571 region->region.memory_size); in kvm_vm_restart()
612 printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" in kvm_print_vcpu_pinning_help()
619 " %s -v 3 -c 22,23,24,50\n\n" in kvm_print_vcpu_pinning_help()
621 " %s -v 3 -c 22,23,24\n\n" in kvm_print_vcpu_pinning_help()
659 * Userspace Memory Region Find
662 * vm - Virtual Machine
663 * start - Starting VM physical address
664 * end - Ending VM physical address, inclusive.
669 * Pointer to overlapping region, NULL if no such region.
671 * Searches for a region with any physical memory that overlaps with
675 * region exists.
682 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
683 struct userspace_mem_region *region = in userspace_mem_region_find() local
685 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
686 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
687 + region->region.memory_size - 1; in userspace_mem_region_find()
689 return region; in userspace_mem_region_find()
692 node = node->rb_left; in userspace_mem_region_find()
694 node = node->rb_right; in userspace_mem_region_find()
704 if (stats->fd < 0) in kvm_stats_release()
707 if (stats->desc) { in kvm_stats_release()
708 free(stats->desc); in kvm_stats_release()
709 stats->desc = NULL; in kvm_stats_release()
712 ret = close(stats->fd); in kvm_stats_release()
714 stats->fd = -1; in kvm_stats_release()
726 * vcpu - VCPU to remove
738 if (vcpu->dirty_gfns) { in vm_vcpu_rm()
739 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
741 vcpu->dirty_gfns = NULL; in vm_vcpu_rm()
744 ret = munmap(vcpu->run, vcpu_mmap_sz()); in vm_vcpu_rm()
747 ret = close(vcpu->fd); in vm_vcpu_rm()
750 kvm_stats_release(&vcpu->stats); in vm_vcpu_rm()
752 list_del(&vcpu->list); in vm_vcpu_rm()
763 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) in kvm_vm_release()
766 ret = close(vmp->fd); in kvm_vm_release()
769 ret = close(vmp->kvm_fd); in kvm_vm_release()
773 kvm_stats_release(&vmp->stats); in kvm_vm_release()
777 struct userspace_mem_region *region) in __vm_mem_region_delete() argument
781 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
782 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
783 hash_del(®ion->slot_node); in __vm_mem_region_delete()
785 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
786 sparsebit_free(®ion->protected_phy_pages); in __vm_mem_region_delete()
787 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
789 if (region->fd >= 0) { in __vm_mem_region_delete()
790 /* There's an extra map when using shared memory. */ in __vm_mem_region_delete()
791 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
793 close(region->fd); in __vm_mem_region_delete()
795 if (region->region.guest_memfd >= 0) in __vm_mem_region_delete()
796 close(region->region.guest_memfd); in __vm_mem_region_delete()
798 free(region); in __vm_mem_region_delete()
808 struct userspace_mem_region *region; in kvm_vm_free() local
814 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
815 __vm_mem_region_delete(vmp, region); in kvm_vm_free()
818 sparsebit_free(&vmp->vpages_valid); in kvm_vm_free()
819 sparsebit_free(&vmp->vpages_mapped); in kvm_vm_free()
836 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); in kvm_memfd_alloc()
848 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
852 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_gpa_insert()
857 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
858 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
859 cur = &(*cur)->rb_left; in vm_userspace_mem_region_gpa_insert()
861 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
862 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
863 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
865 cur = &(*cur)->rb_right; in vm_userspace_mem_region_gpa_insert()
869 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
870 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
874 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
878 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_hva_insert()
883 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
884 cur = &(*cur)->rb_left; in vm_userspace_mem_region_hva_insert()
886 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
887 cregion->host_mem, in vm_userspace_mem_region_hva_insert()
888 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
890 cur = &(*cur)->rb_right; in vm_userspace_mem_region_hva_insert()
894 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
895 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
902 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
910 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
930 struct kvm_userspace_memory_region2 region = { in __vm_set_user_memory_region2() local
942 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); in __vm_set_user_memory_region2()
963 struct userspace_mem_region *region; in vm_mem_add() local
965 size_t mem_size = npages * vm->page_size; in vm_mem_add()
970 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_mem_add()
972 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_mem_add()
974 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_mem_add()
976 " guest_paddr: 0x%lx vm->page_size: 0x%x", in vm_mem_add()
977 guest_paddr, vm->page_size); in vm_mem_add()
978 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_mem_add()
979 <= vm->max_gfn, "Physical range beyond maximum " in vm_mem_add()
982 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", in vm_mem_add()
983 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_mem_add()
986 * Confirm a mem region with an overlapping address doesn't in vm_mem_add()
989 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_mem_add()
990 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_mem_add()
991 if (region != NULL) in vm_mem_add()
997 guest_paddr, npages, vm->page_size, in vm_mem_add()
998 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
999 (uint64_t) region->region.memory_size); in vm_mem_add()
1001 /* Confirm no region with the requested slot already exists. */ in vm_mem_add()
1002 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
1004 if (region->region.slot != slot) in vm_mem_add()
1007 TEST_FAIL("A mem region with the requested slot " in vm_mem_add()
1012 region->region.slot, in vm_mem_add()
1013 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
1014 (uint64_t) region->region.memory_size); in vm_mem_add()
1017 /* Allocate and initialize new mem region structure. */ in vm_mem_add()
1018 region = calloc(1, sizeof(*region)); in vm_mem_add()
1019 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_mem_add()
1020 region->mmap_size = mem_size; in vm_mem_add()
1040 /* Add enough memory to align up if necessary */ in vm_mem_add()
1042 region->mmap_size += alignment; in vm_mem_add()
1044 region->fd = -1; in vm_mem_add()
1046 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_mem_add()
1049 region->mmap_start = mmap(NULL, region->mmap_size, in vm_mem_add()
1051 vm_mem_backing_src_alias(src_type)->flag, in vm_mem_add()
1052 region->fd, 0); in vm_mem_add()
1053 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_mem_add()
1057 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_mem_add()
1059 region->mmap_start, backing_src_pagesz); in vm_mem_add()
1062 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_mem_add()
1067 ret = madvise(region->host_mem, mem_size, in vm_mem_add()
1070 region->host_mem, mem_size, in vm_mem_add()
1071 vm_mem_backing_src_alias(src_type)->name); in vm_mem_add()
1074 region->backing_src_type = src_type; in vm_mem_add()
1085 * can be closed when the region is deleted without in vm_mem_add()
1093 region->region.guest_memfd = guest_memfd; in vm_mem_add()
1094 region->region.guest_memfd_offset = guest_memfd_offset; in vm_mem_add()
1096 region->region.guest_memfd = -1; in vm_mem_add()
1099 region->unused_phy_pages = sparsebit_alloc(); in vm_mem_add()
1101 region->protected_phy_pages = sparsebit_alloc(); in vm_mem_add()
1102 sparsebit_set_num(region->unused_phy_pages, in vm_mem_add()
1103 guest_paddr >> vm->page_shift, npages); in vm_mem_add()
1104 region->region.slot = slot; in vm_mem_add()
1105 region->region.flags = flags; in vm_mem_add()
1106 region->region.guest_phys_addr = guest_paddr; in vm_mem_add()
1107 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1108 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_mem_add()
1109 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_add()
1115 guest_paddr, (uint64_t) region->region.memory_size, in vm_mem_add()
1116 region->region.guest_memfd); in vm_mem_add()
1119 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1120 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1121 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_mem_add()
1123 /* If shared memory, create an alias. */ in vm_mem_add()
1124 if (region->fd >= 0) { in vm_mem_add()
1125 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_mem_add()
1127 vm_mem_backing_src_alias(src_type)->flag, in vm_mem_add()
1128 region->fd, 0); in vm_mem_add()
1129 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_mem_add()
1133 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_mem_add()
1142 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); in vm_userspace_mem_region_add()
1146 * Memslot to region
1149 * vm - Virtual Machine
1150 * memslot - KVM memory slot ID
1155 * Pointer to memory region structure that describe memory region
1156 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1157 * on error (e.g. currently no memory region using memslot as a KVM
1158 * memory slot ID).
1163 struct userspace_mem_region *region; in memslot2region() local
1165 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1167 if (region->region.slot == memslot) in memslot2region()
1168 return region; in memslot2region()
1170 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
1172 fputs("---- vm dump ----\n", stderr); in memslot2region()
1174 TEST_FAIL("Mem region not found"); in memslot2region()
1179 * VM Memory Region Flags Set
1182 * vm - Virtual Machine
1183 * flags - Starting guest physical address
1189 * Sets the flags of the memory region specified by the value of slot,
1195 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1197 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1199 region->region.flags = flags; in vm_mem_region_set_flags()
1201 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_set_flags()
1209 * VM Memory Region Move
1212 * vm - Virtual Machine
1213 * slot - Slot of the memory region to move
1214 * new_gpa - Starting guest physical address
1220 * Change the gpa of a memory region.
1224 struct userspace_mem_region *region; in vm_mem_region_move() local
1227 region = memslot2region(vm, slot); in vm_mem_region_move()
1229 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1231 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_move()
1239 * VM Memory Region Delete
1242 * vm - Virtual Machine
1243 * slot - Slot of the memory region to delete
1249 * Delete a memory region.
1253 struct userspace_mem_region *region = memslot2region(vm, slot); in vm_mem_region_delete() local
1255 region->region.memory_size = 0; in vm_mem_region_delete()
1256 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_delete()
1258 __vm_mem_region_delete(vm, region); in vm_mem_region_delete()
1265 struct userspace_mem_region *region; in vm_guest_mem_fallocate() local
1274 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1275 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, in vm_guest_mem_fallocate()
1276 "Private memory region not found for GPA 0x%lx", gpa); in vm_guest_mem_fallocate()
1278 offset = gpa - region->region.guest_phys_addr; in vm_guest_mem_fallocate()
1279 fd_offset = region->region.guest_memfd_offset + offset; in vm_guest_mem_fallocate()
1280 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); in vm_guest_mem_fallocate()
1282 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); in vm_guest_mem_fallocate()
1285 region->region.guest_memfd, mode, fd_offset); in vm_guest_mem_fallocate()
1309 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1310 if (vcpu->id == vcpu_id) in vcpu_exists()
1330 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); in __vm_vcpu_add()
1332 vcpu->vm = vm; in __vm_vcpu_add()
1333 vcpu->id = vcpu_id; in __vm_vcpu_add()
1334 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1335 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); in __vm_vcpu_add()
1337 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " in __vm_vcpu_add()
1339 vcpu_mmap_sz(), sizeof(*vcpu->run)); in __vm_vcpu_add()
1340 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), in __vm_vcpu_add()
1341 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); in __vm_vcpu_add()
1342 TEST_ASSERT(vcpu->run != MAP_FAILED, in __vm_vcpu_add()
1346 vcpu->stats.fd = vcpu_get_stats_fd(vcpu); in __vm_vcpu_add()
1348 vcpu->stats.fd = -1; in __vm_vcpu_add()
1350 /* Add to linked-list of VCPUs. */ in __vm_vcpu_add()
1351 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1360 * vm - Virtual Machine
1361 * sz - Size (bytes)
1362 * vaddr_min - Minimum Virtual Address
1379 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1382 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1383 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1387 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1389 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1398 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1401 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1410 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1413 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1423 return -1; in vm_vaddr_unused_gap()
1426 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1432 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1439 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1447 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in ____vm_vaddr_alloc()
1451 KVM_UTIL_MIN_PFN * vm->page_size, in ____vm_vaddr_alloc()
1452 vm->memslots[type], protected); in ____vm_vaddr_alloc()
1462 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in ____vm_vaddr_alloc()
1466 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in ____vm_vaddr_alloc()
1490 * vm - Virtual Machine
1491 * sz - Size in bytes
1492 * vaddr_min - Minimum starting virtual address
1503 * a page. The allocated physical space comes from the TEST_DATA memory region.
1514 * vm - Virtual Machine
1538 * vm - Virtual Machine
1557 * vm - Virtual Machine
1558 * vaddr - Virtuall address to map
1559 * paddr - VM Physical Address
1560 * npages - The number of pages to map
1572 size_t page_size = vm->page_size; in virt_map()
1578 while (npages--) { in virt_map()
1580 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in virt_map()
1591 * vm - Virtual Machine
1592 * gpa - VM physical address
1599 * Locates the memory region containing the VM physical address given
1601 * address providing the memory to the vm physical address is returned.
1602 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1606 struct userspace_mem_region *region; in addr_gpa2hva() local
1610 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1611 if (!region) { in addr_gpa2hva()
1612 TEST_FAIL("No vm physical memory at 0x%lx", gpa); in addr_gpa2hva()
1616 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1617 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1624 * vm - Virtual Machine
1625 * hva - Host virtual address
1632 * Locates the memory region containing the host virtual address given
1635 * region containing hva exists.
1641 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1642 struct userspace_mem_region *region = in addr_hva2gpa() local
1645 if (hva >= region->host_mem) { in addr_hva2gpa()
1646 if (hva <= (region->host_mem in addr_hva2gpa()
1647 + region->region.memory_size - 1)) in addr_hva2gpa()
1649 region->region.guest_phys_addr in addr_hva2gpa()
1650 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1652 node = node->rb_right; in addr_hva2gpa()
1654 node = node->rb_left; in addr_hva2gpa()
1658 return -1; in addr_hva2gpa()
1665 * vm - Virtual Machine
1666 * gpa - VM physical address
1672 * (without failing the test) if the guest memory is not shared (so
1677 * memory without mapping said memory in the guest's address space. And, for
1678 * userfaultfd-based demand paging, to do so without triggering userfaults.
1682 struct userspace_mem_region *region; in addr_gpa2alias() local
1685 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1686 if (!region) in addr_gpa2alias()
1689 if (!region->host_alias) in addr_gpa2alias()
1692 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1693 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1701 vm->has_irqchip = true; in vm_create_irqchip()
1710 } while (rc == -1 && errno == EINTR); in _vcpu_run()
1719 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1720 * Assert if the KVM returns an error (other than -EINTR).
1733 vcpu->run->immediate_exit = 1; in vcpu_run_complete_io()
1735 vcpu->run->immediate_exit = 0; in vcpu_run_complete_io()
1737 TEST_ASSERT(ret == -1 && errno == EINTR, in vcpu_run_complete_io()
1753 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); in vcpu_get_reg_list()
1756 reg_list->n = reg_list_n.n; in vcpu_get_reg_list()
1764 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1768 if (!vcpu->dirty_gfns) { in vcpu_map_dirty_ring()
1771 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1775 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1779 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, in vcpu_map_dirty_ring()
1783 vcpu->dirty_gfns = addr; in vcpu_map_dirty_ring()
1784 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); in vcpu_map_dirty_ring()
1787 return vcpu->dirty_gfns; in vcpu_map_dirty_ring()
1819 .fd = -1, in __kvm_create_device()
1894 assert(routing->nr < KVM_MAX_IRQ_ROUTES); in kvm_gsi_routing_irqchip_add()
1896 i = routing->nr; in kvm_gsi_routing_irqchip_add()
1897 routing->entries[i].gsi = gsi; in kvm_gsi_routing_irqchip_add()
1898 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_gsi_routing_irqchip_add()
1899 routing->entries[i].flags = 0; in kvm_gsi_routing_irqchip_add()
1900 routing->entries[i].u.irqchip.irqchip = 0; in kvm_gsi_routing_irqchip_add()
1901 routing->entries[i].u.irqchip.pin = pin; in kvm_gsi_routing_irqchip_add()
1902 routing->nr++; in kvm_gsi_routing_irqchip_add()
1928 * vm - Virtual Machine
1929 * indent - Left margin indent amount
1932 * stream - Output FILE stream
1942 struct userspace_mem_region *region; in vm_dump() local
1945 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1946 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1947 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1949 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1952 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1953 (uint64_t) region->region.memory_size, in vm_dump()
1954 region->host_mem); in vm_dump()
1956 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1957 if (region->protected_phy_pages) { in vm_dump()
1959 sparsebit_dump(stream, region->protected_phy_pages, 0); in vm_dump()
1963 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1965 vm->pgd_created); in vm_dump()
1966 if (vm->pgd_created) { in vm_dump()
1973 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
2030 * exit_reason - Exit reason
2057 * vm - Virtual Machine
2058 * num - number of pages
2059 * paddr_min - Physical address minimum
2060 * memslot - Memory region to allocate page from
2061 * protected - True if the pages will be used as protected/private memory
2077 struct userspace_mem_region *region; in __vm_phy_pages_alloc() local
2082 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in __vm_phy_pages_alloc()
2085 paddr_min, vm->page_size); in __vm_phy_pages_alloc()
2087 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2088 TEST_ASSERT(!protected || region->protected_phy_pages, in __vm_phy_pages_alloc()
2089 "Region doesn't support protected memory"); in __vm_phy_pages_alloc()
2091 base = pg = paddr_min >> vm->page_shift; in __vm_phy_pages_alloc()
2094 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in __vm_phy_pages_alloc()
2095 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2104 paddr_min, vm->page_size, memslot); in __vm_phy_pages_alloc()
2105 fputs("---- vm dump ----\n", stderr); in __vm_phy_pages_alloc()
2111 sparsebit_clear(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2113 sparsebit_set(region->protected_phy_pages, pg); in __vm_phy_pages_alloc()
2116 return base * vm->page_size; in __vm_phy_pages_alloc()
2128 vm->memslots[MEM_REGION_PT]); in vm_alloc_page_table()
2135 * vm - Virtual Machine
2136 * gva - VM virtual address
2150 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
2158 unsigned int n = 1 << (new_page_shift - page_shift); in vm_calc_num_pages()
2161 return num_pages * (1 << (page_shift - new_page_shift)); in vm_calc_num_pages()
2168 return __builtin_ffs(getpagesize()) - 1; in getpageshift()
2197 * stats_fd - the file descriptor for the binary stats file from which to read
2198 * header - the binary stats metadata header corresponding to the given FD
2215 total_size = header->num_desc * desc_size; in read_stats_descriptors()
2217 stats_desc = calloc(header->num_desc, desc_size); in read_stats_descriptors()
2218 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); in read_stats_descriptors()
2220 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); in read_stats_descriptors()
2230 * stats_fd - the file descriptor for the binary stats file from which to read
2231 * header - the binary stats metadata header corresponding to the given FD
2232 * desc - the binary stat metadata for the particular stat to be read
2233 * max_elements - the maximum number of 8-byte values to read into data
2236 * data - the buffer into which stat data should be read
2244 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); in read_stat_data()
2248 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); in read_stat_data()
2249 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); in read_stat_data()
2252 header->data_offset + desc->offset); in read_stat_data()
2255 desc->name, errno, strerror(errno)); in read_stat_data()
2258 desc->name, size, ret); in read_stat_data()
2268 if (!stats->desc) { in kvm_get_stat()
2269 read_stats_header(stats->fd, &stats->header); in kvm_get_stat()
2270 stats->desc = read_stats_descriptors(stats->fd, &stats->header); in kvm_get_stat()
2273 size_desc = get_stats_descriptor_size(&stats->header); in kvm_get_stat()
2275 for (i = 0; i < stats->header.num_desc; ++i) { in kvm_get_stat()
2276 desc = (void *)stats->desc + (i * size_desc); in kvm_get_stat()
2278 if (strcmp(desc->name, name)) in kvm_get_stat()
2281 read_stat_data(stats->fd, &stats->header, desc, data, max_elements); in kvm_get_stat()
2310 struct userspace_mem_region *region; in vm_is_gpa_protected() local
2315 region = userspace_mem_region_find(vm, paddr, paddr); in vm_is_gpa_protected()
2316 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); in vm_is_gpa_protected()
2318 pg = paddr >> vm->page_shift; in vm_is_gpa_protected()
2319 return sparsebit_is_set(region->protected_phy_pages, pg); in vm_is_gpa_protected()