Lines Matching +full:activate +full:- +full:to +full:- +full:activate
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
19 #include <linux/amd-iommu.h>
52 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
56 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
64 * This hash table is used to map VM_ID to a struct kvm_svm,
65 * when handling AMD IOMMU GALOG notification to schedule in
78 struct list_head node; /* Used by SVM for per-vcpu ir_list */
79 void *data; /* Storing pointer to struct amd_ir_data */
90 * This function is called from IOMMU driver to notify
91 * SVM to schedule in a particular vCPU of a particular VM.
106 if (kvm_svm->avic_vm_id != vm_id) in avic_ga_log_notifier()
108 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id); in avic_ga_log_notifier()
115 * bit in the vAPIC backing page. So, we just need to schedule in avic_ga_log_notifier()
132 if (kvm_svm->avic_logical_id_table_page) in avic_vm_destroy()
133 __free_page(kvm_svm->avic_logical_id_table_page); in avic_vm_destroy()
134 if (kvm_svm->avic_physical_id_table_page) in avic_vm_destroy()
135 __free_page(kvm_svm->avic_physical_id_table_page); in avic_vm_destroy()
138 hash_del(&kvm_svm->hnode); in avic_vm_destroy()
145 int err = -ENOMEM; in avic_vm_init()
160 kvm_svm->avic_physical_id_table_page = p_page; in avic_vm_init()
167 kvm_svm->avic_logical_id_table_page = l_page; in avic_vm_init()
172 if (vm_id == 0) { /* id is 1-based, zero is not okay */ in avic_vm_init()
179 if (k2->avic_vm_id == vm_id) in avic_vm_init()
183 kvm_svm->avic_vm_id = vm_id; in avic_vm_init()
184 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id); in avic_vm_init()
196 struct vmcb *vmcb = svm->vmcb; in avic_init_vmcb()
197 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
198 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
199 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page)); in avic_init_vmcb()
200 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page)); in avic_init_vmcb()
202 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK; in avic_init_vmcb()
203 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK; in avic_init_vmcb()
204 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; in avic_init_vmcb()
205 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT; in avic_init_vmcb()
206 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
207 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_init_vmcb()
209 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; in avic_init_vmcb()
216 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_physical_id_entry()
221 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page); in avic_get_physical_id_entry()
228 * AVIC hardware walks the nested page table to check permissions,
234 static int avic_update_access_page(struct kvm *kvm, bool activate) in avic_update_access_page() argument
238 mutex_lock(&kvm->slots_lock); in avic_update_access_page()
242 * memory region. So, we need to ensure that kvm->mm == current->mm. in avic_update_access_page()
244 if ((kvm->arch.apic_access_page_done == activate) || in avic_update_access_page()
245 (kvm->mm != current->mm)) in avic_update_access_page()
251 activate ? PAGE_SIZE : 0); in avic_update_access_page()
255 kvm->arch.apic_access_page_done = activate; in avic_update_access_page()
257 mutex_unlock(&kvm->slots_lock); in avic_update_access_page()
264 int id = vcpu->vcpu_id; in avic_init_backing_page()
268 return -EINVAL; in avic_init_backing_page()
270 if (!svm->vcpu.arch.apic->regs) in avic_init_backing_page()
271 return -EINVAL; in avic_init_backing_page()
273 if (kvm_apicv_activated(vcpu->kvm)) { in avic_init_backing_page()
276 ret = avic_update_access_page(vcpu->kvm, true); in avic_init_backing_page()
281 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); in avic_init_backing_page()
286 return -EINVAL; in avic_init_backing_page()
288 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
293 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
300 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
301 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
302 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
303 u32 index = svm->vmcb->control.exit_info_2 & 0xFF; in avic_incomplete_ipi_interception()
304 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
306 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
314 * the Trigger Mode is edge-triggered. The hardware in avic_incomplete_ipi_interception()
319 * a #VMEXIT, which needs to emulated. in avic_incomplete_ipi_interception()
327 struct kvm *kvm = svm->vcpu.kvm; in avic_incomplete_ipi_interception()
328 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
333 * vcpus. So, we just need to kick the appropriate vcpu. in avic_incomplete_ipi_interception()
348 index, svm->vcpu.vcpu_id, icrh, icrl); in avic_incomplete_ipi_interception()
362 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_logical_id_entry()
371 index = ffs(dlid) - 1; in avic_get_logical_id_entry()
376 int apic = ffs(dlid & 0x0f) - 1; in avic_get_logical_id_entry()
384 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page); in avic_get_logical_id_entry()
394 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; in avic_ldr_write()
397 return -EINVAL; in avic_ldr_write()
411 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
412 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
422 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); in avic_handle_ldr_update()
423 u32 id = kvm_xapic_id(vcpu->arch.apic); in avic_handle_ldr_update()
425 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
434 svm->ldr_reg = ldr; in avic_handle_ldr_update()
443 u32 id = kvm_xapic_id(vcpu->arch.apic); in avic_handle_apic_id_update()
445 if (vcpu->vcpu_id == id) in avic_handle_apic_id_update()
448 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id); in avic_handle_apic_id_update()
453 /* We need to move physical_id_entry to new offset */ in avic_handle_apic_id_update()
456 to_svm(vcpu)->avic_physical_id_cache = new; in avic_handle_apic_id_update()
462 if (svm->ldr_reg) in avic_handle_apic_id_update()
471 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); in avic_handle_dfr_update()
473 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
477 svm->dfr_reg = dfr; in avic_handle_dfr_update()
482 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_unaccel_trap_write()
483 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
488 if (avic_handle_apic_id_update(&svm->vcpu)) in avic_unaccel_trap_write()
492 if (avic_handle_ldr_update(&svm->vcpu)) in avic_unaccel_trap_write()
496 avic_handle_dfr_update(&svm->vcpu); in avic_unaccel_trap_write()
539 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
541 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
543 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
547 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, in avic_unaccelerated_access_interception()
555 ret = kvm_emulate_instruction(&svm->vcpu, 0); in avic_unaccelerated_access_interception()
564 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
566 if (!avic || !irqchip_in_kernel(vcpu->kvm)) in avic_init_vcpu()
569 ret = avic_init_backing_page(&svm->vcpu); in avic_init_vcpu()
573 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
574 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
575 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
588 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate) in svm_toggle_avic_for_irq_window() argument
593 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in svm_toggle_avic_for_irq_window()
594 kvm_request_apicv_update(vcpu->kvm, activate, in svm_toggle_avic_for_irq_window()
596 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in svm_toggle_avic_for_irq_window()
612 static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) in svm_set_pi_irte_mode() argument
619 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in svm_set_pi_irte_mode()
623 * Here, we go through the per-vcpu ir_list to update all existing in svm_set_pi_irte_mode()
626 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_set_pi_irte_mode()
628 if (list_empty(&svm->ir_list)) in svm_set_pi_irte_mode()
631 list_for_each_entry(ir, &svm->ir_list, node) { in svm_set_pi_irte_mode()
632 if (activate) in svm_set_pi_irte_mode()
633 ret = amd_iommu_activate_guest_mode(ir->data); in svm_set_pi_irte_mode()
635 ret = amd_iommu_deactivate_guest_mode(ir->data); in svm_set_pi_irte_mode()
640 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_set_pi_irte_mode()
647 struct vmcb *vmcb = svm->vmcb; in svm_refresh_apicv_exec_ctrl()
658 * we need to check and update the AVIC logical APIC ID table in svm_refresh_apicv_exec_ctrl()
659 * accordingly before re-activating. in svm_refresh_apicv_exec_ctrl()
662 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in svm_refresh_apicv_exec_ctrl()
664 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; in svm_refresh_apicv_exec_ctrl()
678 if (!vcpu->arch.apicv_active) in svm_deliver_avic_intr()
679 return -1; in svm_deliver_avic_intr()
681 kvm_lapic_set_irr(vec, vcpu->arch.apic); in svm_deliver_avic_intr()
685 int cpuid = vcpu->cpu; in svm_deliver_avic_intr()
706 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
707 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
708 if (cur->data != pi->ir_data) in svm_ir_list_del()
710 list_del(&cur->node); in svm_ir_list_del()
714 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
724 * In some cases, the existing irte is updaed and re-set, in svm_ir_list_add()
725 * so we need to check here if it's already been * added in svm_ir_list_add()
726 * to the ir_list. in svm_ir_list_add()
728 if (pi->ir_data && (pi->prev_ga_tag != 0)) { in svm_ir_list_add()
729 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
730 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag); in svm_ir_list_add()
735 ret = -EINVAL; in svm_ir_list_add()
745 * add to the per-vcpu ir_list. in svm_ir_list_add()
749 ret = -ENOMEM; in svm_ir_list_add()
752 ir->data = pi->ir_data; in svm_ir_list_add()
754 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
755 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
756 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
764 * interrupts to a vCPU. So, we still use legacy interrupt
767 * For lowest-priority interrupts, we only support
770 * irqbalance to make the interrupts single-CPU.
785 return -1; in get_pi_vcpu_info()
791 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
792 vcpu_info->vector = irq.vector; in get_pi_vcpu_info()
798 * svm_update_pi_irte - set IRTE for Posted-Interrupts
811 int idx, ret = -EINVAL; in svm_update_pi_irte()
820 idx = srcu_read_lock(&kvm->irq_srcu); in svm_update_pi_irte()
821 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in svm_update_pi_irte()
822 WARN_ON(guest_irq >= irq_rt->nr_rt_entries); in svm_update_pi_irte()
824 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { in svm_update_pi_irte()
828 if (e->type != KVM_IRQ_ROUTING_MSI) in svm_update_pi_irte()
833 * 1. When cannot target interrupt to a specific vcpu. in svm_update_pi_irte()
839 kvm_vcpu_apicv_active(&svm->vcpu)) { in svm_update_pi_irte()
842 /* Try to enable guest_mode in IRTE */ in svm_update_pi_irte()
843 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in svm_update_pi_irte()
845 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, in svm_update_pi_irte()
846 svm->vcpu.vcpu_id); in svm_update_pi_irte()
853 * IOMMU guest mode. Now, we need to store the posted in svm_update_pi_irte()
854 * interrupt information in a per-vcpu ir_list so that in svm_update_pi_irte()
855 * we can reference to them directly when we update vcpu in svm_update_pi_irte()
865 * Here, pi is used to: in svm_update_pi_irte()
866 * - Tell IOMMU to use legacy mode for this interrupt. in svm_update_pi_irte()
867 * - Retrieve ga_tag of prior interrupt remapping data. in svm_update_pi_irte()
876 * was cached. If so, we need to clean up the per-vcpu in svm_update_pi_irte()
890 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in svm_update_pi_irte()
891 e->gsi, vcpu_info.vector, in svm_update_pi_irte()
896 pr_err("%s: failed to update PI IRTE\n", __func__); in svm_update_pi_irte()
903 srcu_read_unlock(&kvm->irq_srcu, idx); in svm_update_pi_irte()
919 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate) in svm_pre_update_apicv_exec_ctrl() argument
921 avic_update_access_page(kvm, activate); in svm_pre_update_apicv_exec_ctrl()
932 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in avic_update_iommu_vcpu_affinity()
936 * Here, we go through the per-vcpu ir_list to update all existing in avic_update_iommu_vcpu_affinity()
939 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
941 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
944 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
945 ret = amd_iommu_update_ga(cpu, r, ir->data); in avic_update_iommu_vcpu_affinity()
950 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
971 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
978 if (svm->avic_is_running) in avic_vcpu_load()
981 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
983 svm->avic_is_running); in avic_vcpu_load()
994 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
996 avic_update_iommu_vcpu_affinity(vcpu, -1, 0); in avic_vcpu_put()
999 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1009 svm->avic_is_running = is_run; in avic_set_running()
1011 avic_vcpu_load(vcpu, vcpu->cpu); in avic_set_running()