Lines Matching +full:8 +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V code
31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
61 TEST_ASSERT(level > -1, in pte_index()
62 "Negative page table level (%d) not possible", level); in pte_index()
63 TEST_ASSERT(level < vm->pgtable_levels, in pte_index()
64 "Invalid page table level (%d)", level); in pte_index()
66 return (gva & pte_index_mask[level]) >> pte_index_shift[level]; in pte_index()
71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc()
73 if (vm->pgd_created) in virt_arch_pgd_alloc()
76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc()
78 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
79 vm->pgd_created = true; in virt_arch_pgd_alloc()
85 int level = vm->pgtable_levels - 1; in virt_arch_pg_map() local
87 TEST_ASSERT((vaddr % vm->page_size) == 0, in virt_arch_pg_map()
89 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); in virt_arch_pg_map()
90 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in virt_arch_pg_map()
91 (vaddr >> vm->page_shift)), in virt_arch_pg_map()
93 TEST_ASSERT((paddr % vm->page_size) == 0, in virt_arch_pg_map()
95 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); in virt_arch_pg_map()
96 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map()
98 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", in virt_arch_pg_map()
99 paddr, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
101 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map()
107 level--; in virt_arch_pg_map()
109 while (level > -1) { in virt_arch_pg_map()
111 pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map()
112 if (!*ptep && level > 0) { in virt_arch_pg_map()
118 level--; in virt_arch_pg_map()
129 int level = vm->pgtable_levels - 1; in addr_arch_gva2gpa() local
131 if (!vm->pgd_created) in addr_arch_gva2gpa()
134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
137 level--; in addr_arch_gva2gpa()
139 while (level > -1) { in addr_arch_gva2gpa()
141 pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
144 level--; in addr_arch_gva2gpa()
147 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
150 TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", in addr_arch_gva2gpa()
151 gva, level); in addr_arch_gva2gpa()
156 uint64_t page, int level) in pte_dump() argument
162 if (level < 0) in pte_dump()
165 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { in pte_dump()
170 type[level], pte, *ptep, ptep); in pte_dump()
172 pte_addr(vm, *ptep), level - 1); in pte_dump()
179 int level = vm->pgtable_levels - 1; in virt_arch_dump() local
182 if (!vm->pgd_created) in virt_arch_dump()
185 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { in virt_arch_dump()
192 pte_addr(vm, *ptep), level - 1); in virt_arch_dump()
198 struct kvm_vm *vm = vcpu->vm; in riscv_vcpu_mmu_setup()
202 * The RISC-V Sv48 MMU mode supports 56-bit physical address in riscv_vcpu_mmu_setup()
203 * for 48-bit virtual address with 4KB last level page size. in riscv_vcpu_mmu_setup()
205 switch (vm->mode) { in riscv_vcpu_mmu_setup()
211 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); in riscv_vcpu_mmu_setup()
214 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; in riscv_vcpu_mmu_setup()
307 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : in vm_arch_vcpu_add()
308 vm->page_size; in vm_arch_vcpu_add()
317 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are in vm_arch_vcpu_add()
318 * powered-off by default so we ensure that all secondary VCPUs in vm_arch_vcpu_add()
319 * are powered-on using KVM_SET_MP_STATE ioctl(). in vm_arch_vcpu_add()
348 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" in vcpu_args_set()
410 ec = regs->cause & ~CAUSE_IRQ_FLAG; in route_exception()
415 if (regs->cause & CAUSE_IRQ_FLAG) { in route_exception()
420 if (handlers && handlers->exception_handlers[vector][ec]) in route_exception()
421 return handlers->exception_handlers[vector][ec](regs); in route_exception()
436 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), in vm_init_vector_tables()
437 vm->page_size, MEM_REGION_DATA); in vm_init_vector_tables()
439 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_vector_tables()
444 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler()
447 handlers->exception_handlers[0][vector] = handler; in vm_install_exception_handler()
452 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_interrupt_handler()
454 handlers->exception_handlers[1][0] = handler; in vm_install_interrupt_handler()