Lines Matching +full:num +full:- +full:vectors

1 // SPDX-License-Identifier: GPL-2.0
25 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
30 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index()
31 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index()
38 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index()
39 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index()
41 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index()
42 "Mode %d does not have 4 page table levels", vm->mode); in pud_index()
49 unsigned int shift = (vm->page_shift - 3) + vm->page_shift; in pmd_index()
50 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pmd_index()
52 TEST_ASSERT(vm->pgtable_levels >= 3, in pmd_index()
53 "Mode %d does not have >= 3 page table levels", vm->mode); in pmd_index()
60 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pte_index()
61 return (gva >> vm->page_shift) & mask; in pte_index()
66 return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) && in use_lpa2_pte_format()
67 (vm->pa_bits > 48 || vm->va_bits > 48); in use_lpa2_pte_format()
75 pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift); in addr_pte()
79 pte = pa & PTE_ADDR_MASK(vm->page_shift); in addr_pte()
80 if (vm->page_shift == 16) in addr_pte()
93 pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift); in pte_addr()
96 pa = pte & PTE_ADDR_MASK(vm->page_shift); in pte_addr()
97 if (vm->page_shift == 16) in pte_addr()
106 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in ptrs_per_pgd()
107 return 1 << (vm->va_bits - shift); in ptrs_per_pgd()
112 return 1 << (vm->page_shift - 3); in ptrs_per_pte()
117 size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc()
119 if (vm->pgd_created) in virt_arch_pgd_alloc()
122 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc()
124 vm->memslots[MEM_REGION_PT]); in virt_arch_pgd_alloc()
125 vm->pgd_created = true; in virt_arch_pgd_alloc()
135 TEST_ASSERT((vaddr % vm->page_size) == 0, in _virt_pg_map()
137 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); in _virt_pg_map()
138 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in _virt_pg_map()
139 (vaddr >> vm->page_shift)), in _virt_pg_map()
141 TEST_ASSERT((paddr % vm->page_size) == 0, in _virt_pg_map()
143 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); in _virt_pg_map()
144 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in _virt_pg_map()
146 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", in _virt_pg_map()
147 paddr, vm->max_gfn, vm->page_size); in _virt_pg_map()
149 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; in _virt_pg_map()
154 switch (vm->pgtable_levels) { in _virt_pg_map()
192 if (!vm->pgd_created) in virt_get_pte_hva()
195 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in virt_get_pte_hva()
199 switch (vm->pgtable_levels) { in virt_get_pte_hva()
230 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
254 int level = 4 - (vm->pgtable_levels - 1); in virt_arch_dump()
257 if (!vm->pgd_created) in virt_arch_dump()
260 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { in virt_arch_dump()
271 struct kvm_vcpu_init default_init = { .target = -1, }; in aarch64_vcpu_setup()
272 struct kvm_vm *vm = vcpu->vm; in aarch64_vcpu_setup()
278 if (init->target == -1) { in aarch64_vcpu_setup()
281 init->target = preferred.target; in aarch64_vcpu_setup()
287 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 in aarch64_vcpu_setup()
296 switch (vm->mode) { in aarch64_vcpu_setup()
299 "with ANY-bit physical address ranges"); in aarch64_vcpu_setup()
320 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); in aarch64_vcpu_setup()
323 ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift); in aarch64_vcpu_setup()
326 switch (vm->mode) { in aarch64_vcpu_setup()
331 ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2; in aarch64_vcpu_setup()
350 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); in aarch64_vcpu_setup()
356 tcr_el1 |= TCR_T0SZ(vm->va_bits); in aarch64_vcpu_setup()
364 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); in aarch64_vcpu_setup()
390 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : in __aarch64_vcpu_add()
391 vm->page_size; in __aarch64_vcpu_add()
417 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) in vcpu_args_set() argument
422 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" in vcpu_args_set()
423 " num: %u", num); in vcpu_args_set()
425 va_start(ap, num); in vcpu_args_set()
427 for (i = 0; i < num; i++) { in vcpu_args_set()
466 extern char vectors; in vcpu_init_descriptor_tables()
468 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); in vcpu_init_descriptor_tables()
497 if (handlers && handlers->exception_handlers[vector][ec]) in route_exception()
498 return handlers->exception_handlers[vector][ec](regs); in route_exception()
506 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), in vm_init_descriptor_tables()
507 vm->page_size, MEM_REGION_DATA); in vm_init_descriptor_tables()
509 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_descriptor_tables()
515 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_sync_handler()
520 handlers->exception_handlers[vector][ec] = handler; in vm_install_sync_handler()
526 struct handlers *handlers = addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler()
530 handlers->exception_handlers[vector][0] = handler; in vm_install_exception_handler()
608 : [res0] "=r"(res->a0), [res1] "=r"(res->a1), \
609 [res2] "=r"(res->a2), [res3] "=r"(res->a3) \
645 * is [0, 2^(64 - TCR_EL1.T0SZ)). in vm_vaddr_populate_bitmap()
647 sparsebit_set_num(vm->vpages_valid, 0, in vm_vaddr_populate_bitmap()
648 (1ULL << vm->va_bits) >> vm->page_shift); in vm_vaddr_populate_bitmap()