Lines Matching +full:fiq +full:- +full:index

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 #include <linux/entry-kvm.h>
78 if (cap->flags) in kvm_vm_ioctl_enable_cap()
79 return -EINVAL; in kvm_vm_ioctl_enable_cap()
81 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
85 &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
88 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
89 if (!system_supports_mte() || kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
90 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
93 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
95 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
99 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
102 new_cap = cap->args[0]; in kvm_vm_ioctl_enable_cap()
104 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
110 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
112 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
115 kvm->arch.mmu.split_page_chunk_size = new_cap; in kvm_vm_ioctl_enable_cap()
117 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
120 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
133 * kvm_arch_init_vm - initializes a VM data structure
140 mutex_init(&kvm->arch.config_lock); in kvm_arch_init_vm()
143 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ in kvm_arch_init_vm()
144 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
145 mutex_lock(&kvm->arch.config_lock); in kvm_arch_init_vm()
146 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_init_vm()
147 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
158 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { in kvm_arch_init_vm()
159 ret = -ENOMEM; in kvm_arch_init_vm()
162 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); in kvm_arch_init_vm()
164 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); in kvm_arch_init_vm()
173 kvm->max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
177 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); in kvm_arch_init_vm()
182 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_init_vm()
195 * kvm_arch_destroy_vm - destroy the VM data structure
200 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
201 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_destroy_vm()
208 kfree(kvm->arch.mpidr_data); in kvm_arch_destroy_vm()
263 r = kvm->max_vcpus; in kvm_vm_ioctl_check_extension()
269 r = -EINVAL; in kvm_vm_ioctl_check_extension()
271 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
313 r = kvm->arch.mmu.split_page_chunk_size; in kvm_vm_ioctl_check_extension()
333 return -EINVAL; in kvm_arch_dev_ioctl()
349 return -EBUSY; in kvm_arch_vcpu_precreate()
351 if (id >= kvm->max_vcpus) in kvm_arch_vcpu_precreate()
352 return -EINVAL; in kvm_arch_vcpu_precreate()
361 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
364 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ in kvm_arch_vcpu_create()
365 mutex_lock(&vcpu->mutex); in kvm_arch_vcpu_create()
366 mutex_lock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
367 mutex_unlock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
368 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_create()
374 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
380 vcpu->arch.fp_state = FP_STATE_FREE; in kvm_arch_vcpu_create()
389 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
391 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
406 if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_destroy()
409 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
431 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
432 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
435 * We guarantee that both TLBs and I-cache are private to each in kvm_arch_vcpu_load()
441 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
443 if (*last_ran != vcpu->vcpu_idx) { in kvm_arch_vcpu_load()
445 *last_ran = vcpu->vcpu_idx; in kvm_arch_vcpu_load()
448 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
456 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
468 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) in kvm_arch_vcpu_load()
484 vcpu->cpu = -1; in kvm_arch_vcpu_put()
489 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_arm_vcpu_power_off()
496 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
498 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
503 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_arm_vcpu_stopped()
508 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); in kvm_arm_vcpu_suspend()
515 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; in kvm_arm_vcpu_suspended()
521 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
531 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
533 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
535 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
544 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
547 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
553 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
563 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); in kvm_arch_vcpu_runnable()
590 mutex_lock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
592 if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1) in kvm_init_mpidr_data()
620 data->mpidr_mask = mask; in kvm_init_mpidr_data()
624 u16 index = kvm_mpidr_index(data, aff); in kvm_init_mpidr_data() local
626 data->cmpidr_to_idx[index] = c; in kvm_init_mpidr_data()
629 kvm->arch.mpidr_data = data; in kvm_init_mpidr_data()
631 mutex_unlock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
641 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_run_pid_change()
645 return -ENOEXEC; in kvm_arch_vcpu_run_pid_change()
648 return -EPERM; in kvm_arch_vcpu_run_pid_change()
672 ret = kvm_init_nv_sysregs(vcpu->kvm); in kvm_arch_vcpu_run_pid_change()
707 mutex_lock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
708 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); in kvm_arch_vcpu_run_pid_change()
709 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
725 vcpu->arch.pause = true; in kvm_arm_halt_guest()
735 vcpu->arch.pause = false; in kvm_arm_resume_guest()
745 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_vcpu_sleep()
748 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_vcpu_sleep()
762 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
774 * kvm_arch_vcpu_runnable has up-to-date data to decide whether in kvm_vcpu_wfi()
815 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); in kvm_vcpu_suspend()
816 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; in kvm_vcpu_suspend()
817 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in kvm_vcpu_suspend()
830 * check_vcpu_requests - check and handle pending vCPU requests
892 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
903 * for pending work and re-enter), return true without writing to ret.
907 struct kvm_run *run = vcpu->run; in kvm_vcpu_exit_request()
919 *ret = -EINTR; in kvm_vcpu_exit_request()
920 run->exit_reason = KVM_EXIT_INTR; in kvm_vcpu_exit_request()
926 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvm_vcpu_exit_request()
927 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; in kvm_vcpu_exit_request()
928 run->fail_entry.cpu = smp_processor_id(); in kvm_vcpu_exit_request()
956 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
967 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
970 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
978 if (run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
979 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
986 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
987 run->flags = 0; in kvm_arch_vcpu_ioctl_run()
1002 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
1009 * preserved on VMID roll-over if the task was preempted, in kvm_arch_vcpu_ioctl_run()
1011 * kvm_arm_vmid_update() in non-premptible context. in kvm_arch_vcpu_ioctl_run()
1013 if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && in kvm_arch_vcpu_ioctl_run()
1015 __load_stage2(vcpu->arch.hw_mmu, in kvm_arch_vcpu_ioctl_run()
1016 vcpu->arch.hw_mmu->arch); in kvm_arch_vcpu_ioctl_run()
1030 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
1032 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
1035 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1057 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1058 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
1126 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
1138 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
1139 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
1152 * being preempt-safe on VHE. in kvm_arch_vcpu_ioctl_run()
1187 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
1188 * virtual IRQ/FIQ fields in the HCR appropriately. in vcpu_interrupt_line()
1199 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
1202 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
1209 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
1214 return -ENXIO; in kvm_vm_ioctl_irq_line()
1218 return -EINVAL; in kvm_vm_ioctl_irq_line()
1221 return -EINVAL; in kvm_vm_ioctl_irq_line()
1226 return -ENXIO; in kvm_vm_ioctl_irq_line()
1230 return -EINVAL; in kvm_vm_ioctl_irq_line()
1233 return -EINVAL; in kvm_vm_ioctl_irq_line()
1238 return -ENXIO; in kvm_vm_ioctl_irq_line()
1241 return -EINVAL; in kvm_vm_ioctl_irq_line()
1246 return -EINVAL; in kvm_vm_ioctl_irq_line()
1276 unsigned long features = init->features[0]; in kvm_vcpu_init_check_features()
1280 return -ENOENT; in kvm_vcpu_init_check_features()
1282 for (i = 1; i < ARRAY_SIZE(init->features); i++) { in kvm_vcpu_init_check_features()
1283 if (init->features[i]) in kvm_vcpu_init_check_features()
1284 return -ENOENT; in kvm_vcpu_init_check_features()
1288 return -EINVAL; in kvm_vcpu_init_check_features()
1296 return -EINVAL; in kvm_vcpu_init_check_features()
1301 return -EINVAL; in kvm_vcpu_init_check_features()
1307 if (kvm_has_mte(vcpu->kvm)) in kvm_vcpu_init_check_features()
1308 return -EINVAL; in kvm_vcpu_init_check_features()
1312 return -EINVAL; in kvm_vcpu_init_check_features()
1320 unsigned long features = init->features[0]; in kvm_vcpu_init_changed()
1322 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, in kvm_vcpu_init_changed()
1328 struct kvm *kvm = vcpu->kvm; in kvm_setup_vcpu()
1335 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) in kvm_setup_vcpu()
1344 unsigned long features = init->features[0]; in __kvm_vcpu_set_target()
1345 struct kvm *kvm = vcpu->kvm; in __kvm_vcpu_set_target()
1346 int ret = -EINVAL; in __kvm_vcpu_set_target()
1348 mutex_lock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1350 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && in __kvm_vcpu_set_target()
1354 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); in __kvm_vcpu_set_target()
1363 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); in __kvm_vcpu_set_target()
1367 mutex_unlock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1376 if (init->target != KVM_ARM_TARGET_GENERIC_V8 && in kvm_vcpu_set_target()
1377 init->target != kvm_target_cpu()) in kvm_vcpu_set_target()
1378 return -EINVAL; in kvm_vcpu_set_target()
1388 return -EINVAL; in kvm_vcpu_set_target()
1401 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid in kvm_arch_vcpu_ioctl_vcpu_init()
1405 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { in kvm_arch_vcpu_ioctl_vcpu_init()
1406 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); in kvm_arch_vcpu_ioctl_vcpu_init()
1420 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1425 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1431 vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); in kvm_arch_vcpu_ioctl_vcpu_init()
1434 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1436 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1441 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in kvm_arch_vcpu_ioctl_vcpu_init()
1443 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1451 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1453 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1465 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1467 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1479 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1481 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1504 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1505 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1506 return -EINVAL; in kvm_arm_vcpu_set_events()
1509 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1510 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1511 return -EINVAL; in kvm_arm_vcpu_set_events()
1519 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1528 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1539 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1543 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1566 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1570 r = -EPERM; in kvm_arch_vcpu_ioctl()
1574 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1581 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1584 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1588 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1595 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1602 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1612 return -EINVAL; in kvm_arch_vcpu_ioctl()
1615 return -EFAULT; in kvm_arch_vcpu_ioctl()
1623 return -EFAULT; in kvm_arch_vcpu_ioctl()
1631 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1634 return -EFAULT; in kvm_arch_vcpu_ioctl()
1639 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1653 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { in kvm_vm_ioctl_set_device_addr()
1656 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1659 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1665 switch (attr->group) { in kvm_vm_has_attr()
1669 return -ENXIO; in kvm_vm_has_attr()
1675 switch (attr->group) { in kvm_vm_set_attr()
1679 return -ENXIO; in kvm_vm_set_attr()
1685 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1693 return -ENXIO; in kvm_arch_vm_ioctl()
1694 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1696 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1703 return -EFAULT; in kvm_arch_vm_ioctl()
1712 return -EFAULT; in kvm_arch_vm_ioctl()
1720 return -EFAULT; in kvm_arch_vm_ioctl()
1727 return -EFAULT; in kvm_arch_vm_ioctl()
1732 return -EFAULT; in kvm_arch_vm_ioctl()
1738 return -EFAULT; in kvm_arch_vm_ioctl()
1746 return -EFAULT; in kvm_arch_vm_ioctl()
1750 return -EINVAL; in kvm_arch_vm_ioctl()
1759 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { in unlock_vcpus()
1761 mutex_unlock(&tmp_vcpu->mutex); in unlock_vcpus()
1767 lockdep_assert_held(&kvm->lock); in unlock_all_vcpus()
1769 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); in unlock_all_vcpus()
1778 lockdep_assert_held(&kvm->lock); in lock_all_vcpus()
1782 * core KVM code tries to grab the vcpu->mutex. in lock_all_vcpus()
1784 * By grabbing the vcpu->mutex of all VCPUs we ensure that no in lock_all_vcpus()
1788 if (!mutex_trylock(&tmp_vcpu->mutex)) { in lock_all_vcpus()
1789 unlock_vcpus(kvm, c - 1); in lock_all_vcpus()
1799 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
1849 * Calculate the raw per-cpu offset without a translation from the in cpu_prepare_hyp_mode()
1851 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_prepare_hyp_mode()
1854 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - in cpu_prepare_hyp_mode()
1857 params->mair_el2 = read_sysreg(mair_el1); in cpu_prepare_hyp_mode()
1872 params->tcr_el2 = tcr; in cpu_prepare_hyp_mode()
1874 params->pgd_pa = kvm_mmu_get_httbr(); in cpu_prepare_hyp_mode()
1876 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; in cpu_prepare_hyp_mode()
1878 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; in cpu_prepare_hyp_mode()
1880 params->hcr_el2 |= HCR_E2H; in cpu_prepare_hyp_mode()
1881 params->vttbr = params->vtcr = 0; in cpu_prepare_hyp_mode()
1915 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
1934 * - If the CPU is affected by Spectre-v2, the hardening sequence is
1938 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
1942 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
1947 * VHE, as we don't have hypervisor-specific mappings. If the system
1953 void *vector = hyp_spectre_vector_selector[data->slot]; in cpu_set_hyp_vector()
1958 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); in cpu_set_hyp_vector()
1963 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); in cpu_hyp_init_context()
2040 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
2041 * re-enable hyp. in hyp_init_cpu_pm_notifier()
2048 * so that the hyp will be re-enabled in hyp_init_cpu_pm_notifier()
2095 * Copy the MPIDR <-> logical CPU ID mapping to hyp. in init_cpu_logical_map()
2141 * Register CPU lower-power notifier in init_subsystems()
2153 case -ENODEV: in init_subsystems()
2154 case -ENXIO: in init_subsystems()
2212 * prevent a later re-init attempt in kvm_arch_hardware_enable(). in do_pkvm_init()
2225 * Although this is per-CPU, we make it global for simplicity, e.g., not in get_hyp_id_aa64pfr0_el1()
2228 * Unlike for non-protected VMs, userspace cannot override this for in get_hyp_id_aa64pfr0_el1()
2284 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2285 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2286 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2287 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2288 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2289 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2290 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2291 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2292 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2293 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2297 /* Inits Hyp-mode on all online CPUs */
2302 int err = -ENOMEM; in init_hyp_mode()
2305 * The protected Hyp-mode cannot be initialized if the memory pool in init_hyp_mode()
2319 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
2326 err = -ENOMEM; in init_hyp_mode()
2334 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
2342 err = -ENOMEM; in init_hyp_mode()
2352 * Map the Hyp-code called directly from the host in init_hyp_mode()
2357 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
2401 err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va); in init_hyp_mode()
2413 params->stack_pa = __pa(stack_page); in init_hyp_mode()
2441 err = -ENODEV; in init_hyp_mode()
2467 if (kvm->arch.mpidr_data) { in kvm_mpidr_to_vcpu()
2468 u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr); in kvm_mpidr_to_vcpu()
2471 kvm->arch.mpidr_data->cmpidr_to_idx[idx]); in kvm_mpidr_to_vcpu()
2501 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
2502 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
2510 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
2511 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
2519 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
2527 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
2530 /* Initialize Hyp-mode and memory mappings on all CPUs */
2538 return -ENODEV; in kvm_arm_init()
2543 return -ENODEV; in kvm_arm_init()
2622 return -EINVAL; in early_kvm_mode_cfg()
2630 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); in early_kvm_mode_cfg()
2653 return -EINVAL; in early_kvm_mode_cfg()
2655 early_param("kvm-arm.mode", early_kvm_mode_cfg);