Lines Matching full:region

202  * range addressed by a single page table into a low and high region
204 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
341 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
379 * MMIO region would prevent silently clobbering the MMIO region. in __vm_create()
382 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
454 struct userspace_mem_region *region; in kvm_vm_restart() local
460 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
461 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, &region->region); in kvm_vm_restart()
467 ret, errno, region->region.slot, in kvm_vm_restart()
468 region->region.flags, in kvm_vm_restart()
469 region->region.guest_phys_addr, in kvm_vm_restart()
470 region->region.memory_size); in kvm_vm_restart()
558 * Userspace Memory Region Find
568 * Pointer to overlapping region, NULL if no such region.
570 * Searches for a region with any physical memory that overlaps with
574 * region exists.
582 struct userspace_mem_region *region = in userspace_mem_region_find() local
584 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
585 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
586 + region->region.memory_size - 1; in userspace_mem_region_find()
588 return region; in userspace_mem_region_find()
654 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
660 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
661 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
662 hash_del(&region->slot_node); in __vm_mem_region_delete()
665 region->region.memory_size = 0; in __vm_mem_region_delete()
666 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in __vm_mem_region_delete()
668 sparsebit_free(&region->unused_phy_pages); in __vm_mem_region_delete()
669 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
671 if (region->fd >= 0) { in __vm_mem_region_delete()
673 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
675 close(region->fd); in __vm_mem_region_delete()
677 if (region->region.guest_memfd >= 0) in __vm_mem_region_delete()
678 close(region->region.guest_memfd); in __vm_mem_region_delete()
680 free(region); in __vm_mem_region_delete()
690 struct userspace_mem_region *region; in kvm_vm_free() local
702 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
703 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
806 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
815 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
816 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
819 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
820 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
821 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
827 rb_link_node(&region->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
828 rb_insert_color(&region->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
832 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
841 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
844 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
846 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
852 rb_link_node(&region->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
853 rb_insert_color(&region->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
860 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
868 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); in __vm_set_user_memory_region()
884 struct kvm_userspace_memory_region2 region = { in __vm_set_user_memory_region2() local
894 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region); in __vm_set_user_memory_region2()
915 struct userspace_mem_region *region; in vm_mem_add() local
936 * Confirm a mem region with an overlapping address doesn't in vm_mem_add()
939 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_mem_add()
941 if (region != NULL) in vm_mem_add()
948 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
949 (uint64_t) region->region.memory_size); in vm_mem_add()
951 /* Confirm no region with the requested slot already exists. */ in vm_mem_add()
952 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
954 if (region->region.slot != slot) in vm_mem_add()
957 TEST_FAIL("A mem region with the requested slot " in vm_mem_add()
962 region->region.slot, in vm_mem_add()
963 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
964 (uint64_t) region->region.memory_size); in vm_mem_add()
967 /* Allocate and initialize new mem region structure. */ in vm_mem_add()
968 region = calloc(1, sizeof(*region)); in vm_mem_add()
969 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_mem_add()
970 region->mmap_size = mem_size; in vm_mem_add()
992 region->mmap_size += alignment; in vm_mem_add()
994 region->fd = -1; in vm_mem_add()
996 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_mem_add()
999 region->mmap_start = mmap(NULL, region->mmap_size, in vm_mem_add()
1002 region->fd, 0); in vm_mem_add()
1003 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_mem_add()
1007 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_mem_add()
1009 region->mmap_start, backing_src_pagesz); in vm_mem_add()
1012 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_mem_add()
1017 ret = madvise(region->host_mem, mem_size, in vm_mem_add()
1020 region->host_mem, mem_size, in vm_mem_add()
1024 region->backing_src_type = src_type; in vm_mem_add()
1035 * can be closed when the region is deleted without in vm_mem_add()
1043 region->region.guest_memfd = guest_memfd; in vm_mem_add()
1044 region->region.guest_memfd_offset = guest_memfd_offset; in vm_mem_add()
1046 region->region.guest_memfd = -1; in vm_mem_add()
1049 region->unused_phy_pages = sparsebit_alloc(); in vm_mem_add()
1050 sparsebit_set_num(region->unused_phy_pages, in vm_mem_add()
1052 region->region.slot = slot; in vm_mem_add()
1053 region->region.flags = flags; in vm_mem_add()
1054 region->region.guest_phys_addr = guest_paddr; in vm_mem_add()
1055 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1056 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_mem_add()
1057 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_add()
1063 guest_paddr, (uint64_t) region->region.memory_size, in vm_mem_add()
1064 region->region.guest_memfd); in vm_mem_add()
1067 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1068 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1069 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_mem_add()
1072 if (region->fd >= 0) { in vm_mem_add()
1073 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_mem_add()
1076 region->fd, 0); in vm_mem_add()
1077 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_mem_add()
1081 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_mem_add()
1094 * Memslot to region
1103 * Pointer to memory region structure that describe memory region
1105 * on error (e.g. currently no memory region using memslot as a KVM
1111 struct userspace_mem_region *region; in memslot2region() local
1113 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1115 if (region->region.slot == memslot) in memslot2region()
1116 return region; in memslot2region()
1118 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
1122 TEST_FAIL("Mem region not found"); in memslot2region()
1127 * VM Memory Region Flags Set
1137 * Sets the flags of the memory region specified by the value of slot,
1143 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1145 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1147 region->region.flags = flags; in vm_mem_region_set_flags()
1149 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_set_flags()
1157 * VM Memory Region Move
1161 * slot - Slot of the memory region to move
1168 * Change the gpa of a memory region.
1172 struct userspace_mem_region *region; in vm_mem_region_move() local
1175 region = memslot2region(vm, slot); in vm_mem_region_move()
1177 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1179 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_move()
1187 * VM Memory Region Delete
1191 * slot - Slot of the memory region to delete
1197 * Delete a memory region.
1208 struct userspace_mem_region *region; in vm_guest_mem_fallocate() local
1217 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1218 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, in vm_guest_mem_fallocate()
1219 "Private memory region not found for GPA 0x%lx", gpa); in vm_guest_mem_fallocate()
1221 offset = gpa - region->region.guest_phys_addr; in vm_guest_mem_fallocate()
1222 fd_offset = region->region.guest_memfd_offset + offset; in vm_guest_mem_fallocate()
1223 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); in vm_guest_mem_fallocate()
1225 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); in vm_guest_mem_fallocate()
1228 region->region.guest_memfd, mode, fd_offset); in vm_guest_mem_fallocate()
1425 * a page. The allocated physical space comes from the TEST_DATA memory region.
1521 * Locates the memory region containing the VM physical address given
1524 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1528 struct userspace_mem_region *region; in addr_gpa2hva() local
1530 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1531 if (!region) { in addr_gpa2hva()
1536 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1537 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1552 * Locates the memory region containing the host virtual address given
1555 * region containing hva exists.
1562 struct userspace_mem_region *region = in addr_hva2gpa() local
1565 if (hva >= region->host_mem) { in addr_hva2gpa()
1566 if (hva <= (region->host_mem in addr_hva2gpa()
1567 + region->region.memory_size - 1)) in addr_hva2gpa()
1569 region->region.guest_phys_addr in addr_hva2gpa()
1570 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1602 struct userspace_mem_region *region; in addr_gpa2alias() local
1605 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1606 if (!region) in addr_gpa2alias()
1609 if (!region->host_alias) in addr_gpa2alias()
1612 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1613 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1861 struct userspace_mem_region *region; in vm_dump() local
1868 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1871 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1872 (uint64_t) region->region.memory_size, in vm_dump()
1873 region->host_mem); in vm_dump()
1875 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1976 * memslot - Memory region to allocate page from
1991 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
2001 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
2006 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
2007 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
2023 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()