Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
84 if (cap->flags) in kvm_vm_ioctl_enable_cap()
85 return -EINVAL; in kvm_vm_ioctl_enable_cap()
87 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
90 kvm->arch.return_nisv_io_abort_to_user = true; in kvm_vm_ioctl_enable_cap()
93 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
109 * Although this is a per-CPU feature, we make it global because in set_default_csv2()
116 kvm->arch.pfr0_csv2 = 1; in set_default_csv2()
120 * kvm_arch_init_vm - initializes a VM data structure
131 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); in kvm_arch_init_vm()
142 kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
148 kvm_free_stage2_pgd(&kvm->arch.mmu); in kvm_arch_init_vm()
159 * kvm_arch_destroy_vm - destroy the VM data structure
166 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
171 if (kvm->vcpus[i]) { in kvm_arch_destroy_vm()
172 kvm_vcpu_destroy(kvm->vcpus[i]); in kvm_arch_destroy_vm()
173 kvm->vcpus[i] = NULL; in kvm_arch_destroy_vm()
176 atomic_set(&kvm->online_vcpus, 0); in kvm_arch_destroy_vm()
212 r = kvm->arch.max_vcpus; in kvm_vm_ioctl_check_extension()
218 r = -EINVAL; in kvm_vm_ioctl_check_extension()
220 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
242 return -EINVAL; in kvm_arch_dev_ioctl()
264 return -EBUSY; in kvm_arch_vcpu_precreate()
266 if (id >= kvm->arch.max_vcpus) in kvm_arch_vcpu_precreate()
267 return -EINVAL; in kvm_arch_vcpu_precreate()
277 vcpu->arch.target = -1; in kvm_arch_vcpu_create()
278 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_arch_vcpu_create()
280 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
289 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
291 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
306 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_destroy()
309 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
325 * WFI), we need to sync back the state of the GIC CPU interface in kvm_arch_vcpu_blocking()
327 * that kvm_arch_vcpu_runnable has up-to-date data to decide in kvm_arch_vcpu_blocking()
346 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
351 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
352 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
356 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
358 if (*last_ran != vcpu->vcpu_id) { in kvm_arch_vcpu_load()
360 *last_ran = vcpu->vcpu_id; in kvm_arch_vcpu_load()
363 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
371 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
392 vcpu->cpu = -1; in kvm_arch_vcpu_put()
397 vcpu->arch.power_off = true; in vcpu_power_off()
405 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
406 mp_state->mp_state = KVM_MP_STATE_STOPPED; in kvm_arch_vcpu_ioctl_get_mpstate()
408 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
418 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
420 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
426 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
433 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
436 * If the guest CPU is not waiting for interrupts or an interrupt line is
437 * asserted, the CPU is by definition runnable.
443 && !v->arch.power_off && !v->arch.pause); in kvm_arch_vcpu_runnable()
451 /* Just ensure a guest exit from a particular CPU */
464 * need_new_vmid_gen - check that the VMID is still valid
469 * The hardware supports a limited set of values with the value zero reserved
478 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ in need_new_vmid_gen()
479 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); in need_new_vmid_gen()
483 * update_vmid - Update the vmid with a valid VMID for the current generation
484 * @vmid: The stage-2 VMID information struct
494 * We need to re-check the vmid_gen here to ensure that if another vcpu in update_vmid()
509 * On SMP we know no other CPUs can use this CPU's or each in update_vmid()
522 vmid->vmid = kvm_next_vmid; in update_vmid()
524 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; in update_vmid()
527 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); in update_vmid()
534 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_first_run_init()
537 if (likely(vcpu->arch.has_run_once)) in kvm_vcpu_first_run_init()
541 return -EPERM; in kvm_vcpu_first_run_init()
543 vcpu->arch.has_run_once = true; in kvm_vcpu_first_run_init()
583 vcpu->arch.pause = true; in kvm_arm_halt_guest()
593 vcpu->arch.pause = false; in kvm_arm_resume_guest()
603 (!vcpu->arch.power_off) &&(!vcpu->arch.pause), in vcpu_req_sleep()
606 if (vcpu->arch.power_off || vcpu->arch.pause) { in vcpu_req_sleep()
621 return vcpu->arch.target >= 0; in kvm_vcpu_initialized()
653 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
664 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
668 return -ENOEXEC; in kvm_arch_vcpu_ioctl_run()
674 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
680 if (run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
681 return -EINTR; in kvm_arch_vcpu_ioctl_run()
688 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
695 update_vmid(&vcpu->arch.hw_mmu->vmid); in kvm_arch_vcpu_ioctl_run()
702 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
717 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
718 run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
731 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
732 run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
740 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
742 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
744 if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || in kvm_arch_vcpu_ioctl_run()
746 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
767 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
768 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
830 * if implemented by the CPU. If we spot the guest in such in kvm_arch_vcpu_ioctl_run()
837 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
842 vcpu->arch.target = -1; in kvm_arch_vcpu_ioctl_run()
849 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
850 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
886 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
898 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
900 int nrcpus = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_irq_line()
902 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
909 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
914 return -ENXIO; in kvm_vm_ioctl_irq_line()
917 return -EINVAL; in kvm_vm_ioctl_irq_line()
921 return -EINVAL; in kvm_vm_ioctl_irq_line()
924 return -EINVAL; in kvm_vm_ioctl_irq_line()
929 return -ENXIO; in kvm_vm_ioctl_irq_line()
932 return -EINVAL; in kvm_vm_ioctl_irq_line()
936 return -EINVAL; in kvm_vm_ioctl_irq_line()
939 return -EINVAL; in kvm_vm_ioctl_irq_line()
941 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); in kvm_vm_ioctl_irq_line()
944 return -ENXIO; in kvm_vm_ioctl_irq_line()
947 return -EINVAL; in kvm_vm_ioctl_irq_line()
952 return -EINVAL; in kvm_vm_ioctl_irq_line()
961 if (init->target != phys_target) in kvm_vcpu_set_target()
962 return -EINVAL; in kvm_vcpu_set_target()
968 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) in kvm_vcpu_set_target()
969 return -EINVAL; in kvm_vcpu_set_target()
971 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ in kvm_vcpu_set_target()
972 for (i = 0; i < sizeof(init->features) * 8; i++) { in kvm_vcpu_set_target()
973 bool set = (init->features[i / 32] & (1 << (i % 32))); in kvm_vcpu_set_target()
976 return -ENOENT; in kvm_vcpu_set_target()
982 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && in kvm_vcpu_set_target()
983 test_bit(i, vcpu->arch.features) != set) in kvm_vcpu_set_target()
984 return -EINVAL; in kvm_vcpu_set_target()
987 set_bit(i, vcpu->arch.features); in kvm_vcpu_set_target()
990 vcpu->arch.target = phys_target; in kvm_vcpu_set_target()
995 vcpu->arch.target = -1; in kvm_vcpu_set_target()
996 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_vcpu_set_target()
1017 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1020 if (vcpu->arch.has_run_once) { in kvm_arch_vcpu_ioctl_vcpu_init()
1022 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1030 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1032 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) in kvm_arch_vcpu_ioctl_vcpu_init()
1035 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_vcpu_init()
1043 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1045 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1057 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1059 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1071 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1073 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1095 /* check whether the reserved field is zero */ in kvm_arm_vcpu_set_events()
1096 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1097 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1098 return -EINVAL; in kvm_arm_vcpu_set_events()
1101 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1102 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1103 return -EINVAL; in kvm_arm_vcpu_set_events()
1111 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1120 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1131 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1135 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1150 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1154 r = -EPERM; in kvm_arch_vcpu_ioctl()
1158 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1165 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1168 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1172 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1179 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1186 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1196 return -EINVAL; in kvm_arch_vcpu_ioctl()
1199 return -EFAULT; in kvm_arch_vcpu_ioctl()
1207 return -EFAULT; in kvm_arch_vcpu_ioctl()
1215 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1218 return -EFAULT; in kvm_arch_vcpu_ioctl()
1223 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1245 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> in kvm_vm_ioctl_set_device_addr()
1247 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> in kvm_vm_ioctl_set_device_addr()
1253 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1254 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); in kvm_vm_ioctl_set_device_addr()
1256 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1263 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1270 return -ENXIO; in kvm_arch_vm_ioctl()
1271 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1273 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1280 return -EFAULT; in kvm_arch_vm_ioctl()
1292 return -EFAULT; in kvm_arch_vm_ioctl()
1297 return -EINVAL; in kvm_arch_vm_ioctl()
1303 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
1320 * !SV2 + !HEL2 -> use direct vectors in kvm_map_vectors()
1321 * SV2 + !HEL2 -> use hardened vectors in place in kvm_map_vectors()
1322 * !SV2 + HEL2 -> allocate one vector slot and use exec mapping in kvm_map_vectors()
1323 * SV2 + HEL2 -> use hardened vectors and use exec mapping in kvm_map_vectors()
1360 * Calculate the raw per-cpu offset without a translation from the in cpu_init_hyp_mode()
1362 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_init_hyp_mode()
1364 tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) - in cpu_init_hyp_mode()
1384 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
1401 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); in cpu_hyp_reinit()
1452 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
1453 * re-enable hyp. in hyp_init_cpu_pm_notifier()
1460 * so that the hardware will be re-enabled in hyp_init_cpu_pm_notifier()
1515 * Register CPU lower-power notifier in init_subsystems()
1527 case -ENODEV: in init_subsystems()
1528 case -ENXIO: in init_subsystems()
1554 int cpu; in teardown_hyp_mode() local
1557 for_each_possible_cpu(cpu) { in teardown_hyp_mode()
1558 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); in teardown_hyp_mode()
1559 free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); in teardown_hyp_mode()
1564 * Inits Hyp-mode on all online CPUs
1568 int cpu; in init_hyp_mode() local
1579 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
1581 for_each_possible_cpu(cpu) { in init_hyp_mode()
1586 err = -ENOMEM; in init_hyp_mode()
1590 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; in init_hyp_mode()
1594 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
1596 for_each_possible_cpu(cpu) { in init_hyp_mode()
1602 err = -ENOMEM; in init_hyp_mode()
1608 kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; in init_hyp_mode()
1612 * Map the Hyp-code called directly from the host in init_hyp_mode()
1617 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
1637 kvm_err("Cannot map vectors\n"); in init_hyp_mode()
1644 for_each_possible_cpu(cpu) { in init_hyp_mode()
1645 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); in init_hyp_mode()
1658 for_each_possible_cpu(cpu) { in init_hyp_mode()
1659 char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; in init_hyp_mode()
1707 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
1708 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
1716 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
1717 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
1725 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
1733 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
1737 * Initialize Hyp-mode and memory mappings on all CPUs.
1742 int ret, cpu; in kvm_arch_init() local
1747 return -ENODEV; in kvm_arch_init()
1753 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); in kvm_arch_init()
1754 return -ENODEV; in kvm_arch_init()
1759 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ in kvm_arch_init()
1762 for_each_online_cpu(cpu) { in kvm_arch_init()
1763 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); in kvm_arch_init()
1765 kvm_err("Error, CPU %d not supported!\n", cpu); in kvm_arch_init()
1766 return -ENODEV; in kvm_arch_init()