Lines Matching +full:intc +full:- +full:no +full:- +full:eoi
1 // SPDX-License-Identifier: GPL-2.0-only
9 * Kevin Wolf <mail@kevin-wolf.de>
32 #include <linux/page-flags.h>
48 #include <asm/ppc-opcode.h>
49 #include <asm/asm-prototypes.h>
69 #include <asm/pnv-pci.h>
100 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4,…
107 MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
133 return kvm->arch.nested_enable && kvm_is_radix(kvm); in nesting_enabled()
174 vcpu = READ_ONCE(vc->runnable_threads[i]); in next_runnable_thread()
185 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
219 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { in kvmppc_ipi_thread()
238 ++vcpu->stat.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
240 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
245 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
263 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
277 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
287 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
288 vc->preempt_tb = mftb(); in kvmppc_core_start_stolen()
289 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
296 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
297 if (vc->preempt_tb != TB_NIL) { in kvmppc_core_end_stolen()
298 vc->stolen_tb += mftb() - vc->preempt_tb; in kvmppc_core_end_stolen()
299 vc->preempt_tb = TB_NIL; in kvmppc_core_end_stolen()
301 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
306 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
310 * We can test vc->runner without taking the vcore lock, in kvmppc_core_vcpu_load_hv()
311 * because only this task ever sets vc->runner to this in kvmppc_core_vcpu_load_hv()
315 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
318 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
319 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
320 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
321 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
322 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
324 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
329 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
332 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
335 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
336 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
337 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
338 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
343 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
352 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
387 return -EINVAL; in kvmppc_set_arch_compat()
393 return -EINVAL; in kvmppc_set_arch_compat()
395 spin_lock(&vc->lock); in kvmppc_set_arch_compat()
396 vc->arch_compat = arch_compat; in kvmppc_set_arch_compat()
401 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; in kvmppc_set_arch_compat()
402 spin_unlock(&vc->lock); in kvmppc_set_arch_compat()
411 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
413 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
419 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
421 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
423 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
425 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
427 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
428 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
430 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
431 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
432 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
434 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
436 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
437 vcpu->arch.last_inst); in kvmppc_dump_regs()
447 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; in init_vpa()
448 vpa->yield_count = cpu_to_be32(1); in init_vpa()
455 if (addr & (L1_CACHE_BYTES - 1)) in set_vpa()
456 return -EINVAL; in set_vpa()
457 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
458 if (v->next_gpa != addr || v->len != len) { in set_vpa()
459 v->next_gpa = addr; in set_vpa()
460 v->len = addr ? len : 0; in set_vpa()
461 v->update_pending = 1; in set_vpa()
463 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
467 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
478 if (vpap->update_pending) in vpa_is_registered()
479 return vpap->next_gpa != 0; in vpa_is_registered()
480 return vpap->pinned_addr != NULL; in vpa_is_registered()
487 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
502 /* Registering new area - address must be cache-line aligned */ in do_h_register_vpa()
503 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) in do_h_register_vpa()
511 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); in do_h_register_vpa()
513 len = be32_to_cpu(((struct reg_vpa *)va)->length.word); in do_h_register_vpa()
526 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
539 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
546 len -= len % sizeof(struct dtl_entry); in do_h_register_vpa()
550 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
553 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
560 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
563 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
570 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
571 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
574 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
579 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
584 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
590 vpap->next_gpa = vpa; in do_h_register_vpa()
591 vpap->len = len; in do_h_register_vpa()
592 vpap->update_pending = 1; in do_h_register_vpa()
595 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
602 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
608 * We need to pin the page pointed to by vpap->next_gpa, in kvmppc_update_vpa()
616 gpa = vpap->next_gpa; in kvmppc_update_vpa()
617 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
622 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
623 if (gpa == vpap->next_gpa) in kvmppc_update_vpa()
630 vpap->update_pending = 0; in kvmppc_update_vpa()
631 if (va && nb < vpap->len) { in kvmppc_update_vpa()
640 if (vpap->pinned_addr) in kvmppc_update_vpa()
641 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, in kvmppc_update_vpa()
642 vpap->dirty); in kvmppc_update_vpa()
643 vpap->gpa = gpa; in kvmppc_update_vpa()
644 vpap->pinned_addr = va; in kvmppc_update_vpa()
645 vpap->dirty = false; in kvmppc_update_vpa()
647 vpap->pinned_end = va + vpap->len; in kvmppc_update_vpa()
652 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
653 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
654 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
657 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
658 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
659 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
660 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
661 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
663 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
664 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
665 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
666 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
668 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
669 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
670 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
682 spin_lock_irqsave(&vc->stoltb_lock, flags); in vcore_stolen_time()
683 p = vc->stolen_tb; in vcore_stolen_time()
684 if (vc->vcore_state != VCORE_INACTIVE && in vcore_stolen_time()
685 vc->preempt_tb != TB_NIL) in vcore_stolen_time()
686 p += now - vc->preempt_tb; in vcore_stolen_time()
687 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in vcore_stolen_time()
701 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
702 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
705 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
706 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
707 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
708 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
709 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
710 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
714 dt->dispatch_reason = 7; in kvmppc_create_dtl_entry()
715 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
716 dt->timebase = cpu_to_be64(now + vc->tb_offset); in kvmppc_create_dtl_entry()
717 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); in kvmppc_create_dtl_entry()
718 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in kvmppc_create_dtl_entry()
719 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
721 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
722 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
723 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
724 /* order writing *dt vs. writing vpa->dtl_idx */ in kvmppc_create_dtl_entry()
726 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
727 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
736 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
739 * Ensure that the read of vcore->dpdes comes after the read in kvmppc_doorbell_pending()
740 * of vcpu->doorbell_request. This barrier matches the in kvmppc_doorbell_pending()
744 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
745 thr = vcpu->vcpu_id - vc->first_vcpuid; in kvmppc_doorbell_pending()
746 return !!(vc->dpdes & (1 << thr)); in kvmppc_doorbell_pending()
751 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
753 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
774 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
785 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
786 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
798 /* Copy guest memory in place - must reside within a single memslot */
810 return -EFAULT; in kvmppc_copy_guest()
811 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest()
813 return -EINVAL; in kvmppc_copy_guest()
816 return -EFAULT; in kvmppc_copy_guest()
817 from_addr |= (from & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
822 return -EFAULT; in kvmppc_copy_guest()
823 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
825 return -EINVAL; in kvmppc_copy_guest()
828 return -EFAULT; in kvmppc_copy_guest()
829 to_addr |= (to & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
835 return -EFAULT; in kvmppc_copy_guest()
844 u64 pg_mask = SZ_4K - 1; in kvmppc_h_page_init()
858 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); in kvmppc_h_page_init()
862 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); in kvmppc_h_page_init()
874 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
884 spin_lock(&vcore->lock); in kvm_arch_vcpu_yield_to()
885 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
886 vcore->vcore_state != VCORE_INACTIVE && in kvm_arch_vcpu_yield_to()
887 vcore->runner) in kvm_arch_vcpu_yield_to()
888 target = vcore->runner; in kvm_arch_vcpu_yield_to()
889 spin_unlock(&vcore->lock); in kvm_arch_vcpu_yield_to()
899 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
900 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
902 yield_count = be32_to_cpu(lppaca->yield_count); in kvmppc_get_yield_count()
903 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
916 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
924 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
929 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
931 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
936 if (target == -1) in kvmppc_pseries_do_hcall()
938 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
954 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
957 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_pseries_do_hcall()
959 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_pseries_do_hcall()
961 if (rc == -ENOENT) in kvmppc_pseries_do_hcall()
1040 if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4])) in kvmppc_pseries_do_hcall()
1046 if (nesting_enabled(vcpu->kvm)) in kvmppc_pseries_do_hcall()
1051 if (!nesting_enabled(vcpu->kvm)) in kvmppc_pseries_do_hcall()
1056 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1057 return -EINTR; in kvmppc_pseries_do_hcall()
1060 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1066 if (nesting_enabled(vcpu->kvm)) in kvmppc_pseries_do_hcall()
1071 if (nesting_enabled(vcpu->kvm)) in kvmppc_pseries_do_hcall()
1082 ret = kvmppc_h_svm_page_in(vcpu->kvm, in kvmppc_pseries_do_hcall()
1090 ret = kvmppc_h_svm_page_out(vcpu->kvm, in kvmppc_pseries_do_hcall()
1098 ret = kvmppc_h_svm_init_start(vcpu->kvm); in kvmppc_pseries_do_hcall()
1103 ret = kvmppc_h_svm_init_done(vcpu->kvm); in kvmppc_pseries_do_hcall()
1110 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1113 ret = kvmppc_h_svm_init_abort(vcpu->kvm); in kvmppc_pseries_do_hcall()
1120 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1126 * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
1132 vcpu->arch.shregs.msr |= MSR_EE; in kvmppc_nested_cede()
1133 vcpu->arch.ceded = 1; in kvmppc_nested_cede()
1135 if (vcpu->arch.prodded) { in kvmppc_nested_cede()
1136 vcpu->arch.prodded = 0; in kvmppc_nested_cede()
1138 vcpu->arch.ceded = 0; in kvmppc_nested_cede()
1164 /* See if it's in the real-mode table */ in kvmppc_hcall_impl_hv()
1182 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in kvmppc_emulate_debug_inst()
1183 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1201 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1203 cpu = vcpu->vcpu_id & ~(nthreads - 1); in kvmppc_read_dpdes()
1205 v = kvmppc_find_vcpu(vcpu->kvm, cpu); in kvmppc_read_dpdes()
1211 * which will update its vcore->dpdes value. in kvmppc_read_dpdes()
1213 pcpu = READ_ONCE(v->cpu); in kvmppc_read_dpdes()
1223 * On POWER9, emulate doorbell-related instructions in order to
1224 * give the guest the illusion of running on a multi-threaded core.
1232 struct kvm *kvm = vcpu->kvm; in kvmppc_emulate_doorbell_instr()
1240 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1247 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1249 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); in kvmppc_emulate_doorbell_instr()
1252 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1253 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1261 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1262 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1287 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_hv()
1290 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
1295 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_exit_hv()
1300 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1303 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1304 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1306 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_handle_exit_hv()
1307 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1310 run->exit_reason = KVM_EXIT_UNKNOWN; in kvmppc_handle_exit_hv()
1311 run->ready_for_interrupt_injection = 1; in kvmppc_handle_exit_hv()
1312 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1313 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_exit_hv()
1315 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
1321 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
1332 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_exit_hv()
1340 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1341 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; in kvmppc_handle_exit_hv()
1348 run->exit_reason = KVM_EXIT_NMI; in kvmppc_handle_exit_hv()
1349 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1350 /* Clear out the old NMI status from run->flags */ in kvmppc_handle_exit_hv()
1351 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; in kvmppc_handle_exit_hv()
1353 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1354 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; in kvmppc_handle_exit_hv()
1356 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; in kvmppc_handle_exit_hv()
1369 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1376 /* hcall - punt to userspace */ in kvmppc_handle_exit_hv()
1383 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
1385 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
1386 run->exit_reason = KVM_EXIT_PAPR_HCALL; in kvmppc_handle_exit_hv()
1387 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1402 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1403 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & in kvmppc_handle_exit_hv()
1405 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1406 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1417 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1418 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1419 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1420 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1421 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
1437 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && in kvmppc_handle_exit_hv()
1449 * This occurs for various TM-related instructions that in kvmppc_handle_exit_hv()
1451 * handled the cases where the guest was in real-suspend in kvmppc_handle_exit_hv()
1464 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1465 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1466 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1479 vcpu->stat.sum_exits++; in kvmppc_handle_nested_exit()
1484 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_nested_exit()
1489 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_nested_exit()
1492 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1493 vcpu->arch.shregs.msr); in kvmppc_handle_nested_exit()
1497 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1498 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_nested_exit()
1500 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
1504 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1509 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1522 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
1531 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
1533 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
1536 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
1537 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
1539 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
1540 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
1541 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
1543 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
1549 * This occurs for various TM-related instructions that in kvmppc_handle_nested_exit()
1551 * handled the cases where the guest was in real-suspend in kvmppc_handle_nested_exit()
1559 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
1578 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1579 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
1580 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1581 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1593 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
1594 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1597 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
1598 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
1599 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1600 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1604 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1612 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
1613 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1616 spin_lock(&vc->lock); in kvmppc_set_lpcr()
1618 * If ILE (interrupt little-endian) has changed, update the in kvmppc_set_lpcr()
1621 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
1626 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1629 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1631 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1637 * ILE (interrupt little-endian) and TC (translation control). in kvmppc_set_lpcr()
1650 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
1653 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); in kvmppc_set_lpcr()
1654 spin_unlock(&vc->lock); in kvmppc_set_lpcr()
1671 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1674 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1677 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1680 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1683 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1686 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1689 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1692 i = id - KVM_REG_PPC_MMCR0; in kvmppc_get_one_reg_hv()
1693 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1696 *val = get_reg_val(id, vcpu->arch.mmcr[2]); in kvmppc_get_one_reg_hv()
1699 *val = get_reg_val(id, vcpu->arch.mmcra); in kvmppc_get_one_reg_hv()
1702 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
1705 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_get_one_reg_hv()
1708 i = id - KVM_REG_PPC_PMC1; in kvmppc_get_one_reg_hv()
1709 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1712 i = id - KVM_REG_PPC_SPMC1; in kvmppc_get_one_reg_hv()
1713 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1716 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1719 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1722 *val = get_reg_val(id, vcpu->arch.sier[0]); in kvmppc_get_one_reg_hv()
1725 *val = get_reg_val(id, vcpu->arch.sier[1]); in kvmppc_get_one_reg_hv()
1728 *val = get_reg_val(id, vcpu->arch.sier[2]); in kvmppc_get_one_reg_hv()
1731 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1734 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1740 * either vcore->dpdes or doorbell_request. in kvmppc_get_one_reg_hv()
1743 *val = get_reg_val(id, vcpu->arch.vcore->dpdes | in kvmppc_get_one_reg_hv()
1744 vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
1747 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
1750 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1753 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1756 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1759 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1762 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1765 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1768 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1771 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1774 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1777 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
1780 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
1783 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1784 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1785 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1788 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1789 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1790 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1791 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1794 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1795 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1796 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1797 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1800 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1804 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1807 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1811 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1814 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1817 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1820 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_get_one_reg_hv()
1821 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1826 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_get_one_reg_hv()
1829 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1832 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1834 r = -ENXIO; in kvmppc_get_one_reg_hv()
1839 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1842 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
1845 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1848 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1851 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1854 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1857 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1860 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1864 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1866 r = -ENXIO; in kvmppc_get_one_reg_hv()
1869 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1872 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1876 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1879 *val = get_reg_val(id, vcpu->arch.dec_expires + in kvmppc_get_one_reg_hv()
1880 vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1883 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
1886 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
1889 r = -EINVAL; in kvmppc_get_one_reg_hv()
1907 r = -EINVAL; in kvmppc_set_one_reg_hv()
1910 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1913 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1916 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1919 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1922 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1925 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1928 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1931 i = id - KVM_REG_PPC_MMCR0; in kvmppc_set_one_reg_hv()
1932 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1935 vcpu->arch.mmcr[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1938 vcpu->arch.mmcra = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1941 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1944 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
1947 i = id - KVM_REG_PPC_PMC1; in kvmppc_set_one_reg_hv()
1948 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1951 i = id - KVM_REG_PPC_SPMC1; in kvmppc_set_one_reg_hv()
1952 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1955 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1958 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1961 vcpu->arch.sier[0] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1964 vcpu->arch.sier[1] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1967 vcpu->arch.sier[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1970 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1973 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1976 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1979 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1982 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1985 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1988 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1990 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1991 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1994 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1997 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2000 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2003 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2006 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2009 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2012 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2015 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2019 r = -EINVAL; in kvmppc_set_one_reg_hv()
2020 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2021 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2023 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2026 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2027 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2028 r = -EINVAL; in kvmppc_set_one_reg_hv()
2029 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2031 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2034 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2035 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2036 r = -EINVAL; in kvmppc_set_one_reg_hv()
2038 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2040 len -= len % sizeof(struct dtl_entry); in kvmppc_set_one_reg_hv()
2041 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2045 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
2055 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2059 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2062 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2065 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2068 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_set_one_reg_hv()
2069 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2074 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_set_one_reg_hv()
2077 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2080 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2082 r = -ENXIO; in kvmppc_set_one_reg_hv()
2086 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2089 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2092 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2095 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2098 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2101 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2104 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2107 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2111 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2113 r = - ENXIO; in kvmppc_set_one_reg_hv()
2116 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2119 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2126 vcpu->arch.dec_expires = set_reg_val(id, *val) - in kvmppc_set_one_reg_hv()
2127 vcpu->arch.vcore->tb_offset; in kvmppc_set_one_reg_hv()
2131 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2132 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2133 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2134 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2135 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2138 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2141 r = -EINVAL; in kvmppc_set_one_reg_hv()
2157 if (kvm->arch.threads_indep) in threads_per_vcore()
2171 spin_lock_init(&vcore->lock); in kvmppc_vcore_create()
2172 spin_lock_init(&vcore->stoltb_lock); in kvmppc_vcore_create()
2173 rcuwait_init(&vcore->wait); in kvmppc_vcore_create()
2174 vcore->preempt_tb = TB_NIL; in kvmppc_vcore_create()
2175 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2176 vcore->first_vcpuid = id; in kvmppc_vcore_create()
2177 vcore->kvm = kvm; in kvmppc_vcore_create()
2178 INIT_LIST_HEAD(&vcore->preempt_list); in kvmppc_vcore_create()
2205 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open()
2210 return -ENOMEM; in debugfs_timings_open()
2212 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
2213 p->vcpu = vcpu; in debugfs_timings_open()
2214 file->private_data = p; in debugfs_timings_open()
2221 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_release()
2223 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
2231 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_read()
2232 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read()
2241 if (!p->buflen) { in debugfs_timings_read()
2242 s = p->buf; in debugfs_timings_read()
2243 buf_end = s + sizeof(p->buf); in debugfs_timings_read()
2251 count = acc->seqcount; in debugfs_timings_read()
2256 if (count == acc->seqcount) { in debugfs_timings_read()
2264 snprintf(s, buf_end - s, "%s: stuck\n", in debugfs_timings_read()
2267 snprintf(s, buf_end - s, in debugfs_timings_read()
2275 p->buflen = s - p->buf; in debugfs_timings_read()
2279 if (pos >= p->buflen) in debugfs_timings_read()
2281 if (len > p->buflen - pos) in debugfs_timings_read()
2282 len = p->buflen - pos; in debugfs_timings_read()
2283 n = copy_to_user(buf, p->buf + pos, len); in debugfs_timings_read()
2286 return -EFAULT; in debugfs_timings_read()
2287 len -= n; in debugfs_timings_read()
2296 return -EACCES; in debugfs_timings_write()
2312 struct kvm *kvm = vcpu->kvm; in debugfs_vcpu_init()
2315 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
2316 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu, in debugfs_vcpu_init()
2334 kvm = vcpu->kvm; in kvmppc_core_vcpu_create_hv()
2335 id = vcpu->vcpu_id; in kvmppc_core_vcpu_create_hv()
2337 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2344 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2346 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2349 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2350 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2353 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2354 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2355 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2356 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2365 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
2368 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2370 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2373 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2377 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2379 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2381 mutex_lock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
2383 err = -EINVAL; in kvmppc_core_vcpu_create_hv()
2385 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2389 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2393 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2396 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2405 err = -ENOMEM; in kvmppc_core_vcpu_create_hv()
2407 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2408 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2409 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2410 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2411 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2414 mutex_unlock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
2419 spin_lock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
2420 ++vcore->num_threads; in kvmppc_core_vcpu_create_hv()
2421 spin_unlock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
2422 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2423 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2424 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2425 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2427 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
2442 return -EINVAL; in kvmhv_set_smt_mode()
2444 return -EINVAL; in kvmhv_set_smt_mode()
2451 return -EINVAL; in kvmhv_set_smt_mode()
2460 mutex_lock(&kvm->lock); in kvmhv_set_smt_mode()
2461 err = -EBUSY; in kvmhv_set_smt_mode()
2462 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
2463 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
2464 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
2467 mutex_unlock(&kvm->lock); in kvmhv_set_smt_mode()
2474 if (vpa->pinned_addr) in unpin_vpa()
2475 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, in unpin_vpa()
2476 vpa->dirty); in unpin_vpa()
2481 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2482 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
2483 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
2484 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
2485 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2499 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
2505 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); in kvmppc_set_timer()
2506 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
2507 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
2517 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
2519 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2521 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
2522 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
2523 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
2524 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
2525 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2526 --vc->n_runnable; in kvmppc_remove_runnable()
2527 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
2538 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_grab_hwthread()
2539 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_grab_hwthread()
2540 tpaca->kvm_hstate.napping = 0; in kvmppc_grab_hwthread()
2542 tpaca->kvm_hstate.hwthread_req = 1; in kvmppc_grab_hwthread()
2554 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { in kvmppc_grab_hwthread()
2555 if (--timeout <= 0) { in kvmppc_grab_hwthread()
2557 return -EBUSY; in kvmppc_grab_hwthread()
2569 tpaca->kvm_hstate.hwthread_req = 0; in kvmppc_release_hwthread()
2570 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_release_hwthread()
2571 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_release_hwthread()
2572 tpaca->kvm_hstate.kvm_split_mode = NULL; in kvmppc_release_hwthread()
2577 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
2583 cpumask_set_cpu(cpu, &nested->need_tlb_flush); in radix_flush_cpu()
2584 cpu_in_guest = &nested->cpu_in_guest; in radix_flush_cpu()
2586 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); in radix_flush_cpu()
2587 cpu_in_guest = &kvm->arch.cpu_in_guest; in radix_flush_cpu()
2602 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
2603 struct kvm *kvm = vcpu->kvm; in kvmppc_prepare_radix_vcpu()
2610 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
2612 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
2632 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
2634 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
2642 struct kvm *kvm = vc->kvm; in kvmppc_start_thread()
2644 cpu = vc->pcpu; in kvmppc_start_thread()
2646 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
2647 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
2648 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
2650 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
2651 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
2652 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
2653 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); in kvmppc_start_thread()
2656 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
2657 tpaca->kvm_hstate.ptid = cpu - vc->pcpu; in kvmppc_start_thread()
2658 tpaca->kvm_hstate.fake_suspend = 0; in kvmppc_start_thread()
2661 tpaca->kvm_hstate.kvm_vcore = vc; in kvmppc_start_thread()
2678 * for any threads that still have a non-NULL vcore ptr. in kvmppc_wait_for_nap()
2681 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
2691 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
2697 * this core are off-line. Then grab the threads so they can't
2720 } while (--thr > 0); in on_primary_thread()
2745 spin_lock_init(&lp->lock); in init_vcore_lists()
2746 INIT_LIST_HEAD(&lp->list); in init_vcore_lists()
2754 vc->vcore_state = VCORE_PREEMPT; in kvmppc_vcore_preempt()
2755 vc->pcpu = smp_processor_id(); in kvmppc_vcore_preempt()
2756 if (vc->num_threads < threads_per_vcore(vc->kvm)) { in kvmppc_vcore_preempt()
2757 spin_lock(&lp->lock); in kvmppc_vcore_preempt()
2758 list_add_tail(&vc->preempt_list, &lp->list); in kvmppc_vcore_preempt()
2759 spin_unlock(&lp->lock); in kvmppc_vcore_preempt()
2771 if (!list_empty(&vc->preempt_list)) { in kvmppc_vcore_end_preempt()
2772 lp = &per_cpu(preempted_vcores, vc->pcpu); in kvmppc_vcore_end_preempt()
2773 spin_lock(&lp->lock); in kvmppc_vcore_end_preempt()
2774 list_del_init(&vc->preempt_list); in kvmppc_vcore_end_preempt()
2775 spin_unlock(&lp->lock); in kvmppc_vcore_end_preempt()
2777 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_end_preempt()
2793 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2794 * respectively in 2-way micro-threading (split-core) mode on POWER8.
2801 cip->n_subcores = 1; in init_core_info()
2802 cip->max_subcore_threads = vc->num_threads; in init_core_info()
2803 cip->total_threads = vc->num_threads; in init_core_info()
2804 cip->subcore_threads[0] = vc->num_threads; in init_core_info()
2805 cip->vc[0] = vc; in init_core_info()
2811 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way in subcore_config_ok()
2812 * split-core mode, with one thread per subcore. in subcore_config_ok()
2834 vc->entry_exit_map = 0; in init_vcore_to_run()
2835 vc->in_guest = 0; in init_vcore_to_run()
2836 vc->napping_threads = 0; in init_vcore_to_run()
2837 vc->conferring_threads = 0; in init_vcore_to_run()
2838 vc->tb_offset_applied = 0; in init_vcore_to_run()
2843 int n_threads = vc->num_threads; in can_dynamic_split()
2850 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) in can_dynamic_split()
2855 kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) in can_dynamic_split()
2858 if (n_threads < cip->max_subcore_threads) in can_dynamic_split()
2859 n_threads = cip->max_subcore_threads; in can_dynamic_split()
2860 if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) in can_dynamic_split()
2862 cip->max_subcore_threads = n_threads; in can_dynamic_split()
2864 sub = cip->n_subcores; in can_dynamic_split()
2865 ++cip->n_subcores; in can_dynamic_split()
2866 cip->total_threads += vc->num_threads; in can_dynamic_split()
2867 cip->subcore_threads[sub] = vc->num_threads; in can_dynamic_split()
2868 cip->vc[sub] = vc; in can_dynamic_split()
2870 list_del_init(&vc->preempt_list); in can_dynamic_split()
2882 if (cip->total_threads + pvc->num_threads > target_threads) in can_piggyback()
2894 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
2895 vcpu->arch.ret = -EINTR; in prepare_threads()
2896 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
2897 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
2898 vcpu->arch.dtl.update_pending) in prepare_threads()
2899 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
2903 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
2912 spin_lock(&lp->lock); in collect_piggybacks()
2913 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { in collect_piggybacks()
2914 if (!spin_trylock(&pvc->lock)) in collect_piggybacks()
2917 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
2918 list_del_init(&pvc->preempt_list); in collect_piggybacks()
2919 if (pvc->runner == NULL) { in collect_piggybacks()
2920 pvc->vcore_state = VCORE_INACTIVE; in collect_piggybacks()
2923 spin_unlock(&pvc->lock); in collect_piggybacks()
2927 spin_unlock(&pvc->lock); in collect_piggybacks()
2931 pvc->vcore_state = VCORE_PIGGYBACK; in collect_piggybacks()
2932 if (cip->total_threads >= target_threads) in collect_piggybacks()
2935 spin_unlock(&lp->lock); in collect_piggybacks()
2944 for (sub = 0; sub < cip->n_subcores; ++sub) { in recheck_signals_and_mmu()
2945 vc = cip->vc[sub]; in recheck_signals_and_mmu()
2946 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
2949 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
2962 spin_lock(&vc->lock); in post_guest_process()
2972 spin_unlock(&vc->lock); in post_guest_process()
2974 if (now < vcpu->arch.dec_expires && in post_guest_process()
2981 if (vcpu->arch.trap) in post_guest_process()
2983 vcpu->arch.run_task); in post_guest_process()
2985 vcpu->arch.ret = ret; in post_guest_process()
2986 vcpu->arch.trap = 0; in post_guest_process()
2988 spin_lock(&vc->lock); in post_guest_process()
2989 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
2990 if (vcpu->arch.pending_exceptions) in post_guest_process()
2992 if (vcpu->arch.ceded) in post_guest_process()
2998 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3004 } else if (vc->runner) { in post_guest_process()
3005 vc->vcore_state = VCORE_PREEMPT; in post_guest_process()
3008 vc->vcore_state = VCORE_INACTIVE; in post_guest_process()
3010 if (vc->n_runnable > 0 && vc->runner == NULL) { in post_guest_process()
3012 i = -1; in post_guest_process()
3014 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3017 spin_unlock(&vc->lock); in post_guest_process()
3037 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; in kvmppc_clear_host_core()
3058 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; in kvmppc_set_host_core()
3066 local_paca->irq_happened |= PACA_IRQ_EE; in set_irq_happened()
3069 local_paca->irq_happened |= PACA_IRQ_DBELL; in set_irq_happened()
3072 local_paca->irq_happened |= PACA_IRQ_HMI; in set_irq_happened()
3082 * Called with vc->lock held.
3109 /* if the runner is no longer runnable, let the caller pick a new one */ in kvmppc_run_core()
3110 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3117 vc->preempt_tb = TB_NIL; in kvmppc_run_core()
3124 controlled_threads = threads_per_vcore(vc->kvm); in kvmppc_run_core()
3130 * On POWER9, we need to be not in independent-threads mode if in kvmppc_run_core()
3135 !kvm_is_radix(vc->kvm); in kvmppc_run_core()
3137 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) || in kvmppc_run_core()
3138 (hpt_on_radix && vc->kvm->arch.threads_indep)) { in kvmppc_run_core()
3140 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3142 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3156 if (vc->num_threads < target_threads) in kvmppc_run_core()
3165 if (kvm_is_radix(vc->kvm)) { in kvmppc_run_core()
3172 * Hard-disable interrupts, and check resched flag and signals. in kvmppc_run_core()
3183 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
3189 spin_unlock(&pvc->lock); in kvmppc_run_core()
3198 /* Decide on micro-threading (split-core) mode */ in kvmppc_run_core()
3230 split_info.lpcr_req = vc->lpcr; in kvmppc_run_core()
3231 split_info.lpidr_req = vc->kvm->arch.lpid; in kvmppc_run_core()
3232 split_info.host_lpcr = vc->kvm->arch.host_lpcr; in kvmppc_run_core()
3244 paca->kvm_hstate.tid = thr; in kvmppc_run_core()
3245 paca->kvm_hstate.napping = 0; in kvmppc_run_core()
3246 paca->kvm_hstate.kvm_split_mode = sip; in kvmppc_run_core()
3249 /* Initiate micro-threading (split-core) on POWER8 if required */ in kvmppc_run_core()
3272 int n_online = atomic_read(&vc->online_count); in kvmppc_run_core()
3275 * Use the 8-thread value if we're doing split-core in kvmppc_run_core()
3291 pvc->pcpu = pcpu + thr; in kvmppc_run_core()
3296 if (!vcpu->arch.ptid) in kvmppc_run_core()
3298 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3315 * When doing micro-threading, poke the inactive threads as well. in kvmppc_run_core()
3328 vc->vcore_state = VCORE_RUNNING; in kvmppc_run_core()
3334 spin_unlock(&core_info.vc[sub]->lock); in kvmppc_run_core()
3338 srcu_idx = srcu_read_lock(&vc->kvm->srcu); in kvmppc_run_core()
3354 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); in kvmppc_run_core()
3358 spin_lock(&vc->lock); in kvmppc_run_core()
3360 vc->vcore_state = VCORE_EXITING; in kvmppc_run_core()
3365 /* Return to whole-core mode if we split the core earlier */ in kvmppc_run_core()
3387 while (paca->kvm_hstate.kvm_split_mode) { in kvmppc_run_core()
3404 if (sip && sip->napped[i]) in kvmppc_run_core()
3406 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); in kvmppc_run_core()
3409 spin_unlock(&vc->lock); in kvmppc_run_core()
3421 spin_lock(&vc->lock); in kvmppc_run_core()
3424 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
3429 * Load up hypervisor-mode registers on P9.
3434 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_load_hv_regs_and_go()
3449 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE); in kvmhv_load_hv_regs_and_go()
3452 hdec = time_limit - mftb(); in kvmhv_load_hv_regs_and_go()
3454 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); in kvmhv_load_hv_regs_and_go()
3460 if (vc->tb_offset) { in kvmhv_load_hv_regs_and_go()
3461 u64 new_tb = mftb() + vc->tb_offset; in kvmhv_load_hv_regs_and_go()
3466 vc->tb_offset_applied = vc->tb_offset; in kvmhv_load_hv_regs_and_go()
3469 if (vc->pcr) in kvmhv_load_hv_regs_and_go()
3470 mtspr(SPRN_PCR, vc->pcr | PCR_MASK); in kvmhv_load_hv_regs_and_go()
3471 mtspr(SPRN_DPDES, vc->dpdes); in kvmhv_load_hv_regs_and_go()
3472 mtspr(SPRN_VTB, vc->vtb); in kvmhv_load_hv_regs_and_go()
3474 local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); in kvmhv_load_hv_regs_and_go()
3475 local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); in kvmhv_load_hv_regs_and_go()
3476 mtspr(SPRN_PURR, vcpu->arch.purr); in kvmhv_load_hv_regs_and_go()
3477 mtspr(SPRN_SPURR, vcpu->arch.spurr); in kvmhv_load_hv_regs_and_go()
3480 mtspr(SPRN_DAWR0, vcpu->arch.dawr); in kvmhv_load_hv_regs_and_go()
3481 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); in kvmhv_load_hv_regs_and_go()
3483 mtspr(SPRN_CIABR, vcpu->arch.ciabr); in kvmhv_load_hv_regs_and_go()
3484 mtspr(SPRN_IC, vcpu->arch.ic); in kvmhv_load_hv_regs_and_go()
3485 mtspr(SPRN_PID, vcpu->arch.pid); in kvmhv_load_hv_regs_and_go()
3487 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | in kvmhv_load_hv_regs_and_go()
3488 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); in kvmhv_load_hv_regs_and_go()
3490 mtspr(SPRN_HFSCR, vcpu->arch.hfscr); in kvmhv_load_hv_regs_and_go()
3492 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); in kvmhv_load_hv_regs_and_go()
3493 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); in kvmhv_load_hv_regs_and_go()
3494 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); in kvmhv_load_hv_regs_and_go()
3495 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); in kvmhv_load_hv_regs_and_go()
3504 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); in kvmhv_load_hv_regs_and_go()
3505 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); in kvmhv_load_hv_regs_and_go()
3512 mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr + in kvmhv_load_hv_regs_and_go()
3513 purr - vcpu->arch.purr); in kvmhv_load_hv_regs_and_go()
3514 mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr + in kvmhv_load_hv_regs_and_go()
3515 spurr - vcpu->arch.spurr); in kvmhv_load_hv_regs_and_go()
3516 vcpu->arch.purr = purr; in kvmhv_load_hv_regs_and_go()
3517 vcpu->arch.spurr = spurr; in kvmhv_load_hv_regs_and_go()
3519 vcpu->arch.ic = mfspr(SPRN_IC); in kvmhv_load_hv_regs_and_go()
3520 vcpu->arch.pid = mfspr(SPRN_PID); in kvmhv_load_hv_regs_and_go()
3521 vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; in kvmhv_load_hv_regs_and_go()
3523 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); in kvmhv_load_hv_regs_and_go()
3524 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); in kvmhv_load_hv_regs_and_go()
3525 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); in kvmhv_load_hv_regs_and_go()
3526 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); in kvmhv_load_hv_regs_and_go()
3530 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); in kvmhv_load_hv_regs_and_go()
3544 * cp_abort is required if the processor supports local copy-paste in kvmhv_load_hv_regs_and_go()
3550 mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ in kvmhv_load_hv_regs_and_go()
3553 vc->dpdes = mfspr(SPRN_DPDES); in kvmhv_load_hv_regs_and_go()
3554 vc->vtb = mfspr(SPRN_VTB); in kvmhv_load_hv_regs_and_go()
3556 if (vc->pcr) in kvmhv_load_hv_regs_and_go()
3559 if (vc->tb_offset_applied) { in kvmhv_load_hv_regs_and_go()
3560 u64 new_tb = mftb() - vc->tb_offset_applied; in kvmhv_load_hv_regs_and_go()
3565 vc->tb_offset_applied = 0; in kvmhv_load_hv_regs_and_go()
3569 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); in kvmhv_load_hv_regs_and_go()
3575 * Virtual-mode guest entry for POWER9 and later when the host and
3581 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_p9_guest_entry()
3594 local_paca->kvm_hstate.dec_expires = dec + tb; in kvmhv_p9_guest_entry()
3595 if (local_paca->kvm_hstate.dec_expires < time_limit) in kvmhv_p9_guest_entry()
3596 time_limit = local_paca->kvm_hstate.dec_expires; in kvmhv_p9_guest_entry()
3598 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
3604 vc->entry_exit_map = 1; in kvmhv_p9_guest_entry()
3605 vc->in_guest = 1; in kvmhv_p9_guest_entry()
3607 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3608 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3609 u32 yield_count = be32_to_cpu(lp->yield_count) + 1; in kvmhv_p9_guest_entry()
3610 lp->yield_count = cpu_to_be32(yield_count); in kvmhv_p9_guest_entry()
3611 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3616 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
3621 load_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
3623 load_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
3625 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvmhv_p9_guest_entry()
3627 mtspr(SPRN_DSCR, vcpu->arch.dscr); in kvmhv_p9_guest_entry()
3628 mtspr(SPRN_IAMR, vcpu->arch.iamr); in kvmhv_p9_guest_entry()
3629 mtspr(SPRN_PSPB, vcpu->arch.pspb); in kvmhv_p9_guest_entry()
3630 mtspr(SPRN_FSCR, vcpu->arch.fscr); in kvmhv_p9_guest_entry()
3631 mtspr(SPRN_TAR, vcpu->arch.tar); in kvmhv_p9_guest_entry()
3632 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in kvmhv_p9_guest_entry()
3633 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in kvmhv_p9_guest_entry()
3634 mtspr(SPRN_BESCR, vcpu->arch.bescr); in kvmhv_p9_guest_entry()
3635 mtspr(SPRN_WORT, vcpu->arch.wort); in kvmhv_p9_guest_entry()
3636 mtspr(SPRN_TIDR, vcpu->arch.tid); in kvmhv_p9_guest_entry()
3637 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_p9_guest_entry()
3638 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_p9_guest_entry()
3639 mtspr(SPRN_AMR, vcpu->arch.amr); in kvmhv_p9_guest_entry()
3640 mtspr(SPRN_UAMOR, vcpu->arch.uamor); in kvmhv_p9_guest_entry()
3642 if (!(vcpu->arch.ctrl & 1)) in kvmhv_p9_guest_entry()
3645 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); in kvmhv_p9_guest_entry()
3659 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_p9_guest_entry()
3662 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_p9_guest_entry()
3664 if (vcpu->arch.nested) { in kvmhv_p9_guest_entry()
3665 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_p9_guest_entry()
3666 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_p9_guest_entry()
3668 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_p9_guest_entry()
3669 hvregs.vcpu_token = vcpu->vcpu_id; in kvmhv_p9_guest_entry()
3673 __pa(&vcpu->arch.regs)); in kvmhv_p9_guest_entry()
3675 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_p9_guest_entry()
3676 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_p9_guest_entry()
3677 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_p9_guest_entry()
3678 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_p9_guest_entry()
3682 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && in kvmhv_p9_guest_entry()
3692 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
3697 vcpu->arch.dec_expires = dec + tb; in kvmhv_p9_guest_entry()
3698 vcpu->cpu = -1; in kvmhv_p9_guest_entry()
3699 vcpu->arch.thread_cpu = -1; in kvmhv_p9_guest_entry()
3700 vcpu->arch.ctrl = mfspr(SPRN_CTRLF); in kvmhv_p9_guest_entry()
3702 vcpu->arch.iamr = mfspr(SPRN_IAMR); in kvmhv_p9_guest_entry()
3703 vcpu->arch.pspb = mfspr(SPRN_PSPB); in kvmhv_p9_guest_entry()
3704 vcpu->arch.fscr = mfspr(SPRN_FSCR); in kvmhv_p9_guest_entry()
3705 vcpu->arch.tar = mfspr(SPRN_TAR); in kvmhv_p9_guest_entry()
3706 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); in kvmhv_p9_guest_entry()
3707 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); in kvmhv_p9_guest_entry()
3708 vcpu->arch.bescr = mfspr(SPRN_BESCR); in kvmhv_p9_guest_entry()
3709 vcpu->arch.wort = mfspr(SPRN_WORT); in kvmhv_p9_guest_entry()
3710 vcpu->arch.tid = mfspr(SPRN_TIDR); in kvmhv_p9_guest_entry()
3711 vcpu->arch.amr = mfspr(SPRN_AMR); in kvmhv_p9_guest_entry()
3712 vcpu->arch.uamor = mfspr(SPRN_UAMOR); in kvmhv_p9_guest_entry()
3713 vcpu->arch.dscr = mfspr(SPRN_DSCR); in kvmhv_p9_guest_entry()
3723 if (host_amr != vcpu->arch.amr) in kvmhv_p9_guest_entry()
3727 store_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
3729 store_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
3731 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvmhv_p9_guest_entry()
3735 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
3738 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3739 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3740 u32 yield_count = be32_to_cpu(lp->yield_count) + 1; in kvmhv_p9_guest_entry()
3741 lp->yield_count = cpu_to_be32(yield_count); in kvmhv_p9_guest_entry()
3742 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3743 save_pmu = lp->pmcregs_in_use; in kvmhv_p9_guest_entry()
3746 save_pmu |= nesting_enabled(vcpu->kvm); in kvmhv_p9_guest_entry()
3750 vc->entry_exit_map = 0x101; in kvmhv_p9_guest_entry()
3751 vc->in_guest = 0; in kvmhv_p9_guest_entry()
3753 mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb()); in kvmhv_p9_guest_entry()
3754 mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso); in kvmhv_p9_guest_entry()
3772 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
3773 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
3774 spin_unlock(&vc->lock); in kvmppc_wait_for_exec()
3776 spin_lock(&vc->lock); in kvmppc_wait_for_exec()
3778 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
3786 vc->halt_poll_ns *= halt_poll_ns_grow; in grow_halt_poll_ns()
3787 if (vc->halt_poll_ns < halt_poll_ns_grow_start) in grow_halt_poll_ns()
3788 vc->halt_poll_ns = halt_poll_ns_grow_start; in grow_halt_poll_ns()
3794 vc->halt_poll_ns = 0; in shrink_halt_poll_ns()
3796 vc->halt_poll_ns /= halt_poll_ns_shrink; in shrink_halt_poll_ns()
3804 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
3805 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
3816 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
3825 * exceptions or are no longer ceded
3833 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcore_check_block()
3842 * or external interrupt to one of the vcpus. vc->lock is held.
3852 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
3853 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); in kvmppc_vcore_blocked()
3854 ++vc->runner->stat.halt_attempted_poll; in kvmppc_vcore_blocked()
3856 vc->vcore_state = VCORE_POLLING; in kvmppc_vcore_blocked()
3857 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
3867 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
3868 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
3871 ++vc->runner->stat.halt_successful_poll; in kvmppc_vcore_blocked()
3876 prepare_to_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
3879 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
3882 if (vc->halt_poll_ns) in kvmppc_vcore_blocked()
3883 ++vc->runner->stat.halt_successful_poll; in kvmppc_vcore_blocked()
3889 vc->vcore_state = VCORE_SLEEPING; in kvmppc_vcore_blocked()
3891 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
3893 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
3894 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
3895 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
3897 ++vc->runner->stat.halt_successful_wait; in kvmppc_vcore_blocked()
3902 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); in kvmppc_vcore_blocked()
3906 vc->runner->stat.halt_wait_ns += in kvmppc_vcore_blocked()
3907 ktime_to_ns(cur) - ktime_to_ns(start_wait); in kvmppc_vcore_blocked()
3909 if (vc->halt_poll_ns) in kvmppc_vcore_blocked()
3910 vc->runner->stat.halt_poll_fail_ns += in kvmppc_vcore_blocked()
3911 ktime_to_ns(start_wait) - in kvmppc_vcore_blocked()
3915 if (vc->halt_poll_ns) in kvmppc_vcore_blocked()
3916 vc->runner->stat.halt_poll_success_ns += in kvmppc_vcore_blocked()
3917 ktime_to_ns(cur) - in kvmppc_vcore_blocked()
3923 if (block_ns <= vc->halt_poll_ns) in kvmppc_vcore_blocked()
3926 else if (vc->halt_poll_ns && block_ns > halt_poll_ns) in kvmppc_vcore_blocked()
3929 else if (vc->halt_poll_ns < halt_poll_ns && in kvmppc_vcore_blocked()
3932 if (vc->halt_poll_ns > halt_poll_ns) in kvmppc_vcore_blocked()
3933 vc->halt_poll_ns = halt_poll_ns; in kvmppc_vcore_blocked()
3935 vc->halt_poll_ns = 0; in kvmppc_vcore_blocked()
3948 struct kvm *kvm = vcpu->kvm; in kvmhv_setup_mmu()
3950 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
3951 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
3957 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
3960 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
3966 struct kvm_run *run = vcpu->run; in kvmppc_run_vcpu()
3973 run->exit_reason = 0; in kvmppc_run_vcpu()
3974 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
3975 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
3981 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
3982 spin_lock(&vc->lock); in kvmppc_run_vcpu()
3983 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
3984 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
3985 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
3986 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
3987 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
3988 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
3989 ++vc->n_runnable; in kvmppc_run_vcpu()
3997 if ((vc->vcore_state == VCORE_PIGGYBACK || in kvmppc_run_vcpu()
3998 vc->vcore_state == VCORE_RUNNING) && in kvmppc_run_vcpu()
4003 } else if (vc->vcore_state == VCORE_SLEEPING) { in kvmppc_run_vcpu()
4004 rcuwait_wake_up(&vc->wait); in kvmppc_run_vcpu()
4009 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4012 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4013 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4015 spin_lock(&vc->lock); in kvmppc_run_vcpu()
4017 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_run_vcpu()
4018 run->fail_entry. in kvmppc_run_vcpu()
4020 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4025 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4028 if (vc->vcore_state != VCORE_INACTIVE) { in kvmppc_run_vcpu()
4034 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4036 v->stat.signal_exits++; in kvmppc_run_vcpu()
4037 v->run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4038 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4039 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4042 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4047 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4049 v->arch.ceded = 0; in kvmppc_run_vcpu()
4051 vc->runner = vcpu; in kvmppc_run_vcpu()
4052 if (n_ceded == vc->n_runnable) { in kvmppc_run_vcpu()
4057 cond_resched_lock(&vc->lock); in kvmppc_run_vcpu()
4058 if (vc->vcore_state == VCORE_PREEMPT) in kvmppc_run_vcpu()
4063 vc->runner = NULL; in kvmppc_run_vcpu()
4066 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4067 (vc->vcore_state == VCORE_RUNNING || in kvmppc_run_vcpu()
4068 vc->vcore_state == VCORE_EXITING || in kvmppc_run_vcpu()
4069 vc->vcore_state == VCORE_PIGGYBACK)) in kvmppc_run_vcpu()
4072 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4075 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4077 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
4078 run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4079 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4082 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { in kvmppc_run_vcpu()
4084 i = -1; in kvmppc_run_vcpu()
4086 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4090 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4091 return vcpu->arch.ret; in kvmppc_run_vcpu()
4097 struct kvm_run *run = vcpu->run; in kvmhv_run_single_vcpu()
4101 struct kvm *kvm = vcpu->kvm; in kvmhv_run_single_vcpu()
4102 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4106 run->exit_reason = 0; in kvmhv_run_single_vcpu()
4107 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4108 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4110 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4111 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4112 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4113 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmhv_run_single_vcpu()
4114 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4115 vcpu->arch.busy_preempt = TB_NIL; in kvmhv_run_single_vcpu()
4116 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4117 vc->runnable_threads[0] = vcpu; in kvmhv_run_single_vcpu()
4118 vc->n_runnable = 1; in kvmhv_run_single_vcpu()
4119 vc->runner = vcpu; in kvmhv_run_single_vcpu()
4122 if (!kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4131 vc->preempt_tb = TB_NIL; in kvmhv_run_single_vcpu()
4135 vc->pcpu = pcpu; in kvmhv_run_single_vcpu()
4142 if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4147 if (vcpu->arch.doorbell_request) { in kvmhv_run_single_vcpu()
4148 vc->dpdes = 1; in kvmhv_run_single_vcpu()
4150 vcpu->arch.doorbell_request = 0; in kvmhv_run_single_vcpu()
4153 &vcpu->arch.pending_exceptions)) in kvmhv_run_single_vcpu()
4155 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4156 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4158 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4164 local_paca->kvm_hstate.tid = 0; in kvmhv_run_single_vcpu()
4165 local_paca->kvm_hstate.napping = 0; in kvmhv_run_single_vcpu()
4166 local_paca->kvm_hstate.kvm_split_mode = NULL; in kvmhv_run_single_vcpu()
4171 vc->vcore_state = VCORE_RUNNING; in kvmhv_run_single_vcpu()
4175 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; in kvmhv_run_single_vcpu()
4183 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_run_single_vcpu()
4191 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4197 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_run_single_vcpu()
4200 mtspr(SPRN_LPID, kvm->arch.host_lpid); in kvmhv_run_single_vcpu()
4211 cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); in kvmhv_run_single_vcpu()
4221 ((get_tb() < vcpu->arch.dec_expires) || in kvmhv_run_single_vcpu()
4234 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4236 if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && in kvmhv_run_single_vcpu()
4239 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { in kvmhv_run_single_vcpu()
4241 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4242 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
4243 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4246 spin_lock(&vc->lock); in kvmhv_run_single_vcpu()
4248 spin_unlock(&vc->lock); in kvmhv_run_single_vcpu()
4251 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4253 vc->vcore_state = VCORE_INACTIVE; in kvmhv_run_single_vcpu()
4260 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4263 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4264 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
4265 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4274 struct kvm_run *run = vcpu->run; in kvmppc_vcpu_run_hv()
4282 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_vcpu_run_hv()
4284 return -EINVAL; in kvmppc_vcpu_run_hv()
4290 * If the guest has TM enabled, save away their TM-related SPRs in kvmppc_vcpu_run_hv()
4294 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && in kvmppc_vcpu_run_hv()
4295 (current->thread.regs->msr & MSR_TM)) { in kvmppc_vcpu_run_hv()
4296 if (MSR_TM_ACTIVE(current->thread.regs->msr)) { in kvmppc_vcpu_run_hv()
4297 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_vcpu_run_hv()
4298 run->fail_entry.hardware_entry_failure_reason = 0; in kvmppc_vcpu_run_hv()
4299 return -EINVAL; in kvmppc_vcpu_run_hv()
4303 current->thread.tm_tfhar = mfspr(SPRN_TFHAR); in kvmppc_vcpu_run_hv()
4304 current->thread.tm_tfiar = mfspr(SPRN_TFIAR); in kvmppc_vcpu_run_hv()
4305 current->thread.tm_texasr = mfspr(SPRN_TEXASR); in kvmppc_vcpu_run_hv()
4306 current->thread.regs->msr &= ~MSR_TM; in kvmppc_vcpu_run_hv()
4314 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4315 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4316 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4321 /* No need to go into the guest when all we'll do is come back out */ in kvmppc_vcpu_run_hv()
4323 run->exit_reason = KVM_EXIT_INTR; in kvmppc_vcpu_run_hv()
4324 return -EINTR; in kvmppc_vcpu_run_hv()
4327 kvm = vcpu->kvm; in kvmppc_vcpu_run_hv()
4328 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4343 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4344 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4345 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4356 if (kvm->arch.threads_indep && kvm_is_radix(kvm) && in kvmppc_vcpu_run_hv()
4359 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4363 if (run->exit_reason == KVM_EXIT_PAPR_HCALL && in kvmppc_vcpu_run_hv()
4364 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
4370 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_vcpu_run_hv()
4372 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
4373 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_vcpu_run_hv()
4388 mtspr(SPRN_FSCR, current->thread.fscr); in kvmppc_vcpu_run_hv()
4392 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
4393 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4400 (*sps)->page_shift = shift; in kvmppc_add_seg_page_size()
4401 (*sps)->slb_enc = sllp; in kvmppc_add_seg_page_size()
4402 (*sps)->enc[0].page_shift = shift; in kvmppc_add_seg_page_size()
4403 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); in kvmppc_add_seg_page_size()
4409 if (penc != -1) { in kvmppc_add_seg_page_size()
4410 (*sps)->enc[1].page_shift = 24; in kvmppc_add_seg_page_size()
4411 (*sps)->enc[1].pte_enc = penc; in kvmppc_add_seg_page_size()
4427 info->data_keys = 32; in kvm_vm_ioctl_get_smmu_info_hv()
4428 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; in kvm_vm_ioctl_get_smmu_info_hv()
4430 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ in kvm_vm_ioctl_get_smmu_info_hv()
4431 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; in kvm_vm_ioctl_get_smmu_info_hv()
4432 info->slb_size = 32; in kvm_vm_ioctl_get_smmu_info_hv()
4434 /* We only support these sizes for now, and no muti-size segments */ in kvm_vm_ioctl_get_smmu_info_hv()
4435 sps = &info->sps[0]; in kvm_vm_ioctl_get_smmu_info_hv()
4442 info->flags |= KVM_PPC_NO_HASH; in kvm_vm_ioctl_get_smmu_info_hv()
4460 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4462 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log_hv()
4463 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log_hv()
4467 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv()
4468 r = -ENOENT; in kvm_vm_ioctl_get_dirty_log_hv()
4469 if (!memslot || !memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv()
4477 buf = memslot->dirty_bitmap + n / sizeof(long); in kvm_vm_ioctl_get_dirty_log_hv()
4493 p = memslot->dirty_bitmap; in kvm_vm_ioctl_get_dirty_log_hv()
4500 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4501 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4502 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4503 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4506 r = -EFAULT; in kvm_vm_ioctl_get_dirty_log_hv()
4507 if (copy_to_user(log->dirty_bitmap, buf, n)) in kvm_vm_ioctl_get_dirty_log_hv()
4512 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4518 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
4519 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
4527 unsigned long npages = mem->memory_size >> PAGE_SHIFT; in kvmppc_core_prepare_memory_region_hv()
4530 slot->arch.rmap = vzalloc(array_size(npages, in kvmppc_core_prepare_memory_region_hv()
4531 sizeof(*slot->arch.rmap))); in kvmppc_core_prepare_memory_region_hv()
4532 if (!slot->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
4533 return -ENOMEM; in kvmppc_core_prepare_memory_region_hv()
4545 unsigned long npages = mem->memory_size >> PAGE_SHIFT; in kvmppc_core_commit_memory_region_hv()
4550 * MMIO be no longer emulated MMIO, so invalidate in kvmppc_core_commit_memory_region_hv()
4554 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
4559 * flush shadow mappings. For KVM_MR_CREATE we have no in kvmppc_core_commit_memory_region_hv()
4564 * to get rid of any THP PTEs in the partition-scoped page tables in kvmppc_core_commit_memory_region_hv()
4570 ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) in kvmppc_core_commit_memory_region_hv()
4575 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
4596 * Update LPCR values in kvm->arch and in vcores.
4597 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4598 * of kvm->arch.lpcr update).
4605 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
4608 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
4611 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
4614 spin_lock(&vc->lock); in kvmppc_update_lpcr()
4615 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
4616 spin_unlock(&vc->lock); in kvmppc_update_lpcr()
4617 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
4627 /* PS field - page size for VRMA */ in kvmppc_setup_partition_table()
4628 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
4629 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
4631 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
4634 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
4637 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
4638 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
4640 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
4644 * Set up HPT (hashed page table) and RMA (real-mode area).
4645 * Must be called with kvm->arch.mmu_setup_lock held.
4650 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
4659 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
4667 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) in kvmppc_hv_setup_htab_rma()
4679 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_hv_setup_htab_rma()
4683 err = -EINVAL; in kvmppc_hv_setup_htab_rma()
4684 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_hv_setup_htab_rma()
4688 hva = memslot->userspace_addr; in kvmppc_hv_setup_htab_rma()
4689 mmap_read_lock(kvm->mm); in kvmppc_hv_setup_htab_rma()
4690 vma = find_vma(kvm->mm, hva); in kvmppc_hv_setup_htab_rma()
4691 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) in kvmppc_hv_setup_htab_rma()
4696 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
4708 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
4715 /* the -4 is to account for senc values starting at 0x10 */ in kvmppc_hv_setup_htab_rma()
4716 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
4720 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
4724 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_hv_setup_htab_rma()
4729 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
4734 * Must be called with kvm->arch.mmu_setup_lock held and
4735 * mmu_ready = 0 and no vcpus running.
4742 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
4744 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
4745 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
4746 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
4754 * Must be called with kvm->arch.mmu_setup_lock held and
4755 * mmu_ready = 0 and no vcpus running.
4766 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
4767 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
4768 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
4769 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
4777 * Allocate a per-core structure for managing state about which cores are
4782 * It is only freed when the kvm-hv module is unloaded.
4802 ops->rm_core = kzalloc(size, GFP_KERNEL); in kvmppc_alloc_host_rm_ops()
4804 if (!ops->rm_core) { in kvmppc_alloc_host_rm_ops()
4816 ops->rm_core[core].rm_state.in_host = 1; in kvmppc_alloc_host_rm_ops()
4819 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; in kvmppc_alloc_host_rm_ops()
4824 * Do an atomic assignment (no locks used here), but if someone in kvmppc_alloc_host_rm_ops()
4832 kfree(ops->rm_core); in kvmppc_alloc_host_rm_ops()
4848 kfree(kvmppc_host_rm_ops_hv->rm_core); in kvmppc_free_host_rm_ops()
4861 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
4862 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
4863 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
4869 return -ENOMEM; in kvmppc_core_init_vm_hv()
4870 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
4884 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
4887 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
4888 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
4891 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
4895 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
4896 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
4903 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
4912 * be unnecessary but better safe than sorry in case we re-enable in kvmppc_core_init_vm_hv()
4931 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
4932 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
4937 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
4943 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
4946 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
4953 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
4955 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
4957 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
4959 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
4970 kvm->arch.threads_indep = true; in kvmppc_core_init_vm_hv()
4972 kvm->arch.threads_indep = indep_threads_mode; in kvmppc_core_init_vm_hv()
4975 if (!kvm->arch.threads_indep) in kvmppc_core_init_vm_hv()
4986 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
4988 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
4989 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
4994 snprintf(buf, sizeof(buf), "vm%d", current->pid); in kvmppc_core_init_vm_hv()
4995 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
5008 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5009 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5014 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()
5016 if (!kvm->arch.threads_indep) in kvmppc_core_destroy_vm_hv()
5025 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5031 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5032 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5033 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5034 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5037 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5071 return -EIO; in kvmppc_core_check_processor_compat_hv()
5078 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5099 return -EIO; in kvmppc_set_passthru_irq()
5101 mutex_lock(&kvm->lock); in kvmppc_set_passthru_irq()
5103 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5108 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5109 return -ENOMEM; in kvmppc_set_passthru_irq()
5111 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5115 * For now, we only support interrupts for which the EOI operation in kvmppc_set_passthru_irq()
5117 * what our real-mode EOI code does, or a XIVE interrupt in kvmppc_set_passthru_irq()
5119 chip = irq_data_get_irq_chip(&desc->irq_data); in kvmppc_set_passthru_irq()
5123 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5124 return -ENOENT; in kvmppc_set_passthru_irq()
5130 * otherwise re-use this entry. in kvmppc_set_passthru_irq()
5132 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_set_passthru_irq()
5133 if (guest_gsi == pimap->mapped[i].v_hwirq) { in kvmppc_set_passthru_irq()
5134 if (pimap->mapped[i].r_hwirq) { in kvmppc_set_passthru_irq()
5135 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5136 return -EINVAL; in kvmppc_set_passthru_irq()
5143 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5144 return -EAGAIN; /* table is full */ in kvmppc_set_passthru_irq()
5147 irq_map = &pimap->mapped[i]; in kvmppc_set_passthru_irq()
5149 irq_map->v_hwirq = guest_gsi; in kvmppc_set_passthru_irq()
5150 irq_map->desc = desc; in kvmppc_set_passthru_irq()
5157 irq_map->r_hwirq = desc->irq_data.hwirq; in kvmppc_set_passthru_irq()
5159 if (i == pimap->n_mapped) in kvmppc_set_passthru_irq()
5160 pimap->n_mapped++; in kvmppc_set_passthru_irq()
5165 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); in kvmppc_set_passthru_irq()
5167 irq_map->r_hwirq = 0; in kvmppc_set_passthru_irq()
5169 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5185 return -EIO; in kvmppc_clr_passthru_irq()
5187 mutex_lock(&kvm->lock); in kvmppc_clr_passthru_irq()
5188 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
5191 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
5193 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_clr_passthru_irq()
5194 if (guest_gsi == pimap->mapped[i].v_hwirq) in kvmppc_clr_passthru_irq()
5198 if (i == pimap->n_mapped) { in kvmppc_clr_passthru_irq()
5199 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
5200 return -ENODEV; in kvmppc_clr_passthru_irq()
5204 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); in kvmppc_clr_passthru_irq()
5206 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); in kvmppc_clr_passthru_irq()
5209 pimap->mapped[i].r_hwirq = 0; in kvmppc_clr_passthru_irq()
5216 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
5227 irqfd->producer = prod; in kvmppc_irq_bypass_add_producer_hv()
5229 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_add_producer_hv()
5232 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_add_producer_hv()
5244 irqfd->producer = NULL; in kvmppc_irq_bypass_del_producer_hv()
5248 * default external interrupt handling mode - KVM real mode in kvmppc_irq_bypass_del_producer_hv()
5251 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_del_producer_hv()
5254 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_del_producer_hv()
5261 struct kvm *kvm __maybe_unused = filp->private_data; in kvm_arch_vm_ioctl_hv()
5272 r = -EOPNOTSUPP; in kvm_arch_vm_ioctl_hv()
5276 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5289 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5299 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5310 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5319 r = -ENOTTY; in kvm_arch_vm_ioctl_hv()
5328 * all hcalls that were implemented before the hcall-enabling
5376 return -ENODEV; in kvmhv_configure_mmu()
5379 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) in kvmhv_configure_mmu()
5380 return -EINVAL; in kvmhv_configure_mmu()
5383 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); in kvmhv_configure_mmu()
5384 if (!!(cfg->process_table & PATB_GR) != radix) in kvmhv_configure_mmu()
5385 return -EINVAL; in kvmhv_configure_mmu()
5388 if ((cfg->process_table & PRTS_MASK) > 24) in kvmhv_configure_mmu()
5389 return -EINVAL; in kvmhv_configure_mmu()
5393 return -EINVAL; in kvmhv_configure_mmu()
5397 return -EINVAL; in kvmhv_configure_mmu()
5399 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5401 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
5402 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
5405 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
5406 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
5407 err = -EBUSY; in kvmhv_configure_mmu()
5419 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
5422 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
5427 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5434 return -EPERM; in kvmhv_enable_nested()
5436 return -ENODEV; in kvmhv_enable_nested()
5440 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
5447 int rc = -EINVAL; in kvmhv_load_from_eaddr()
5453 rc = -EINVAL; in kvmhv_load_from_eaddr()
5457 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
5458 rc = -EAGAIN; in kvmhv_load_from_eaddr()
5466 int rc = -EINVAL; in kvmhv_store_to_eaddr()
5472 rc = -EINVAL; in kvmhv_store_to_eaddr()
5476 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
5477 rc = -EAGAIN; in kvmhv_store_to_eaddr()
5485 vpa->gpa = 0; in unpin_vpa_reset()
5486 vpa->pinned_addr = NULL; in unpin_vpa_reset()
5487 vpa->dirty = false; in unpin_vpa_reset()
5488 vpa->update_pending = 0; in unpin_vpa_reset()
5500 return -EINVAL; in kvmhv_enable_svm()
5502 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
5509 * - Release all device pages
5510 * - Issue ucall to terminate the guest on the UV side
5511 * - Unpin the VPA pages.
5512 * - Reinit the partition scoped page tables
5522 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
5525 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
5526 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
5527 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
5528 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
5531 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
5532 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
5533 ret = -EBUSY; in kvmhv_svm_off()
5538 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_svm_off()
5548 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
5551 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_svm_off()
5553 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
5555 ret = -EINVAL; in kvmhv_svm_off()
5561 * to UV via UV_PAGE_IN before the non-boot vcpus get a in kvmhv_svm_off()
5570 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5571 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
5572 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
5573 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
5574 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5578 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
5579 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
5581 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
5641 if (paca_ptrs[first_cpu]->sibling_subcore_state) in kvm_init_subcore_bitmap()
5648 return -ENOMEM; in kvm_init_subcore_bitmap()
5654 paca_ptrs[cpu]->sibling_subcore_state = in kvm_init_subcore_bitmap()
5671 pr_err("KVM-HV: Host does not support TLBIE\n"); in kvmppc_book3s_init_hv()
5672 return -ENODEV; in kvmppc_book3s_init_hv()
5680 return -ENODEV; in kvmppc_book3s_init_hv()
5692 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or in kvmppc_book3s_init_hv()
5697 !local_paca->kvm_hstate.xics_phys) { in kvmppc_book3s_init_hv()
5700 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); in kvmppc_book3s_init_hv()
5702 pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); in kvmppc_book3s_init_hv()
5703 return -ENODEV; in kvmppc_book3s_init_hv()
5705 /* presence of intc confirmed - node can be dropped again */ in kvmppc_book3s_init_hv()
5738 pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); in kvmppc_book3s_init_hv()