Lines Matching full:vm
29 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument
31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
34 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument
40 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
63 TEST_ASSERT(level < vm->pgtable_levels, in pte_index()
69 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc()
73 if (vm->pgd_created) in virt_arch_pgd_alloc()
76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc()
78 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
79 vm->pgd_created = true; in virt_arch_pgd_alloc()
82 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_arch_pg_map() argument
85 int level = vm->pgtable_levels - 1; in virt_arch_pg_map()
87 TEST_ASSERT((vaddr % vm->page_size) == 0, in virt_arch_pg_map()
89 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); in virt_arch_pg_map()
90 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in virt_arch_pg_map()
91 (vaddr >> vm->page_shift)), in virt_arch_pg_map()
93 TEST_ASSERT((paddr % vm->page_size) == 0, in virt_arch_pg_map()
95 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); in virt_arch_pg_map()
96 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map()
98 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", in virt_arch_pg_map()
99 paddr, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
101 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map()
103 next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; in virt_arch_pg_map()
110 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in virt_arch_pg_map()
111 pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map()
113 next_ppn = vm_alloc_page_table(vm) >> in virt_arch_pg_map()
126 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
129 int level = vm->pgtable_levels - 1; in addr_arch_gva2gpa()
131 if (!vm->pgd_created) in addr_arch_gva2gpa()
134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
140 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in addr_arch_gva2gpa()
141 pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
147 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
150 TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", in addr_arch_gva2gpa()
155 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, in pte_dump() argument
165 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { in pte_dump()
166 ptep = addr_gpa2hva(vm, pte); in pte_dump()
171 pte_dump(stream, vm, indent + 1, in pte_dump()
172 pte_addr(vm, *ptep), level - 1); in pte_dump()
177 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_arch_dump() argument
179 int level = vm->pgtable_levels - 1; in virt_arch_dump()
182 if (!vm->pgd_created) in virt_arch_dump()
185 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { in virt_arch_dump()
186 ptep = addr_gpa2hva(vm, pgd); in virt_arch_dump()
191 pte_dump(stream, vm, indent + 1, in virt_arch_dump()
192 pte_addr(vm, *ptep), level - 1); in virt_arch_dump()
198 struct kvm_vm *vm = vcpu->vm; in riscv_vcpu_mmu_setup() local
205 switch (vm->mode) { in riscv_vcpu_mmu_setup()
211 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); in riscv_vcpu_mmu_setup()
214 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; in riscv_vcpu_mmu_setup()
298 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument
307 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : in vm_arch_vcpu_add()
308 vm->page_size; in vm_arch_vcpu_add()
309 stack_vaddr = __vm_vaddr_alloc(vm, stack_size, in vm_arch_vcpu_add()
313 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
434 void vm_init_vector_tables(struct kvm_vm *vm) in vm_init_vector_tables() argument
436 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), in vm_init_vector_tables()
437 vm->page_size, MEM_REGION_DATA); in vm_init_vector_tables()
439 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_vector_tables()
442 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) in vm_install_exception_handler() argument
444 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler()
450 void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler) in vm_install_interrupt_handler() argument
452 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_interrupt_handler()