Lines Matching +full:ipa +full:- +full:reg

1 // SPDX-License-Identifier: GPL-2.0-only
33 hyp_spin_lock(&vm->lock); in guest_lock_component()
40 hyp_spin_unlock(&vm->lock); in guest_unlock_component()
128 /* The host stage 2 is id-mapped, so use parange for T0SZ */ in prepare_host_vtcr()
145 mmu->arch = &host_mmu.arch; in kvm_host_prepare_stage2()
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2()
158 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2()
159 atomic64_set(&mmu->vmid.id, 0); in kvm_host_prepare_stage2()
172 void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size)); in guest_s2_zalloc_pages_exact()
186 hyp_put_page(&current_vm->pool, addr + (i * PAGE_SIZE)); in guest_s2_free_pages_exact()
194 addr = hyp_alloc_pages(&current_vm->pool, 0); in guest_s2_zalloc_page()
204 p->refcount = 1; in guest_s2_zalloc_page()
205 p->order = 0; in guest_s2_zalloc_page()
212 hyp_get_page(&current_vm->pool, addr); in guest_s2_get_page()
217 hyp_put_page(&current_vm->pool, addr); in guest_s2_put_page()
234 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; in kvm_guest_prepare_stage2()
238 nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; in kvm_guest_prepare_stage2()
239 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); in kvm_guest_prepare_stage2()
243 hyp_spin_lock_init(&vm->lock); in kvm_guest_prepare_stage2()
244 vm->mm_ops = (struct kvm_pgtable_mm_ops) { in kvm_guest_prepare_stage2()
258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, in kvm_guest_prepare_stage2()
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2()
276 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_pgtable_pages()
277 vm->kvm.arch.mmu.pgd_phys = 0ULL; in reclaim_pgtable_pages()
281 addr = hyp_alloc_pages(&vm->pool, 0); in reclaim_pgtable_pages()
284 page->refcount = 0; in reclaim_pgtable_pages()
285 page->order = 0; in reclaim_pgtable_pages()
288 addr = hyp_alloc_pages(&vm->pool, 0); in reclaim_pgtable_pages()
297 if (params->hcr_el2 & HCR_VM) in __pkvm_prot_finalize()
298 return -EPERM; in __pkvm_prot_finalize()
300 params->vttbr = kvm_get_vttbr(mmu); in __pkvm_prot_finalize()
301 params->vtcr = mmu->vtcr; in __pkvm_prot_finalize()
302 params->hcr_el2 |= HCR_VM; in __pkvm_prot_finalize()
307 * page-table walks that have started before we trapped to EL2 in __pkvm_prot_finalize()
312 write_sysreg(params->hcr_el2, hcr_el2); in __pkvm_prot_finalize()
332 struct memblock_region *reg; in host_stage2_unmap_dev_all() local
336 /* Unmap all non-memory regions to recycle the pages */ in host_stage2_unmap_dev_all()
337 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { in host_stage2_unmap_dev_all()
338 reg = &hyp_memory[i]; in host_stage2_unmap_dev_all()
339 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all()
343 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all()
354 struct memblock_region *reg; in find_mem_range() local
357 range->start = 0; in find_mem_range()
358 range->end = ULONG_MAX; in find_mem_range()
363 reg = &hyp_memory[cur]; in find_mem_range()
364 end = reg->base + reg->size; in find_mem_range()
365 if (addr < reg->base) { in find_mem_range()
367 range->end = reg->base; in find_mem_range()
370 range->start = end; in find_mem_range()
372 range->start = reg->base; in find_mem_range()
373 range->end = end; in find_mem_range()
374 return reg; in find_mem_range()
390 return range->start <= addr && addr < range->end; in is_in_mem_range()
395 struct memblock_region *reg; in check_range_allowed_memory() local
402 reg = find_mem_range(start, &range); in check_range_allowed_memory()
403 if (!is_in_mem_range(end - 1, &range)) in check_range_allowed_memory()
404 return -EINVAL; in check_range_allowed_memory()
406 if (!reg || reg->flags & MEMBLOCK_NOMAP) in check_range_allowed_memory()
407 return -EPERM; in check_range_allowed_memory()
419 return is_in_mem_range(end - 1, &r); in range_is_memory()
425 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, in __host_stage2_idmap()
440 if (__ret == -ENOMEM) { \
451 return parent->start <= child->start && child->end <= parent->end; in range_included()
467 return -EAGAIN; in host_stage2_adjust_range()
470 WARN_ON(addr_is_memory(addr) && hyp_phys_to_page(addr)->host_state != PKVM_NOPAGE); in host_stage2_adjust_range()
471 return -EPERM; in host_stage2_adjust_range()
499 hyp_phys_to_page(addr)->host_state = state; in __host_update_page_state()
507 return -EPERM; in host_stage2_set_owner_locked()
526 * Block mappings must be used with care in the host stage-2 as a in host_stage2_force_pte_cb()
530 * That assumption is correct for the host stage-2 with RWX mappings in host_stage2_force_pte_cb()
534 * the host stage-2 page-table is in fact the only place where this in host_stage2_force_pte_cb()
535 * state is stored. In all those cases, it is safer to use page-level in host_stage2_force_pte_cb()
536 * mappings, hence avoiding to lose the state because of side-effects in in host_stage2_force_pte_cb()
559 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); in host_stage2_idmap()
575 * We've presumably raced with a page-table change which caused in handle_host_mem_abort()
583 * Yikes, we couldn't resolve the fault IPA. This should reinject an in handle_host_mem_abort()
590 BUG_ON(ret && ret != -EAGAIN); in handle_host_mem_abort()
601 struct check_walk_data *d = ctx->arg; in __check_page_state_visitor()
603 return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; in __check_page_state_visitor()
630 if (hyp_phys_to_page(addr)->host_state != state) in __host_check_page_state_range()
631 return -EPERM; in __host_check_page_state_range()
640 if (hyp_phys_to_page(addr)->host_state == PKVM_NOPAGE) { in __host_set_page_state_range()
689 hyp_assert_lock_held(&vm->lock); in __guest_check_page_state_range()
690 return check_page_state_range(&vm->pgt, addr, size, &d); in __guest_check_page_state_range()
741 ret = -EBUSY; in __pkvm_host_unshare_hyp()
819 u64 size = end - start; in hyp_pin_shared_mem()
895 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_share_guest() local
900 return -EINVAL; in __pkvm_host_share_guest()
909 ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE); in __pkvm_host_share_guest()
914 switch (page->host_state) { in __pkvm_host_share_guest()
919 if (page->host_share_guest_count) in __pkvm_host_share_guest()
921 /* Only host to np-guest multi-sharing is tolerated */ in __pkvm_host_share_guest()
925 ret = -EPERM; in __pkvm_host_share_guest()
929 WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys, in __pkvm_host_share_guest()
931 &vcpu->vcpu.arch.pkvm_memcache, 0)); in __pkvm_host_share_guest()
932 page->host_share_guest_count++; in __pkvm_host_share_guest()
941 static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa) in __check_host_shared_guest() argument
950 ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); in __check_host_shared_guest()
954 return -ENOENT; in __check_host_shared_guest()
956 return -E2BIG; in __check_host_shared_guest()
958 state = guest_get_page_state(pte, ipa); in __check_host_shared_guest()
960 return -EPERM; in __check_host_shared_guest()
968 if (page->host_state != PKVM_PAGE_SHARED_OWNED) in __check_host_shared_guest()
969 return -EPERM; in __check_host_shared_guest()
970 if (WARN_ON(!page->host_share_guest_count)) in __check_host_shared_guest()
971 return -EINVAL; in __check_host_shared_guest()
980 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_unshare_guest() local
988 ret = __check_host_shared_guest(vm, &phys, ipa); in __pkvm_host_unshare_guest()
992 ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); in __pkvm_host_unshare_guest()
997 page->host_share_guest_count--; in __pkvm_host_unshare_guest()
998 if (!page->host_share_guest_count) in __pkvm_host_unshare_guest()
1008 static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa) in assert_host_shared_guest() argument
1019 ret = __check_host_shared_guest(vm, &phys, ipa); in assert_host_shared_guest()
1024 WARN_ON(ret && ret != -ENOENT); in assert_host_shared_guest()
1030 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_relax_perms_guest() local
1034 return -EPERM; in __pkvm_host_relax_perms_guest()
1037 return -EINVAL; in __pkvm_host_relax_perms_guest()
1039 assert_host_shared_guest(vm, ipa); in __pkvm_host_relax_perms_guest()
1041 ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); in __pkvm_host_relax_perms_guest()
1049 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_wrprotect_guest() local
1053 return -EPERM; in __pkvm_host_wrprotect_guest()
1055 assert_host_shared_guest(vm, ipa); in __pkvm_host_wrprotect_guest()
1057 ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE); in __pkvm_host_wrprotect_guest()
1065 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_test_clear_young_guest() local
1069 return -EPERM; in __pkvm_host_test_clear_young_guest()
1071 assert_host_shared_guest(vm, ipa); in __pkvm_host_test_clear_young_guest()
1073 ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold); in __pkvm_host_test_clear_young_guest()
1082 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_mkyoung_guest() local
1085 return -EPERM; in __pkvm_host_mkyoung_guest()
1087 assert_host_shared_guest(vm, ipa); in __pkvm_host_mkyoung_guest()
1089 kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); in __pkvm_host_mkyoung_guest()