Lines Matching full:vcpu
135 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
167 struct kvm_vcpu *vcpu; in next_runnable_thread() local
170 vcpu = READ_ONCE(vc->runnable_threads[i]); in next_runnable_thread()
171 if (vcpu) { in next_runnable_thread()
173 return vcpu; in next_runnable_thread()
180 #define for_each_runnable_thread(i, vcpu, vc) \ argument
181 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
227 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) in kvmppc_fast_vcpu_kick_hv() argument
235 * is the barrier in vcpu run that orders setting the cpu fields vs in kvmppc_fast_vcpu_kick_hv()
239 waitp = kvm_arch_vcpu_get_wait(vcpu); in kvmppc_fast_vcpu_kick_hv()
241 ++vcpu->stat.generic.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
243 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
248 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
256 * Stolen time is counted as time when either the vcpu is able to
258 * is preempted or sleeping, or when the vcpu needs something done
259 * in the kernel by the task running the vcpu, but that task is
261 * separately, since one of the vcpu tasks will take on the job
262 * of running the core, and the other vcpu tasks in the vcore will
266 * Hence we accumulate stolen time when the vcpu can run as part of
267 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
270 * stolen time for a vcore when it is inactive, or for a vcpu
272 * a misnomer; it means that the vcpu task is not executing in
275 * between time that the vcpu is genuinely stopped, time that
276 * the task is actively working on behalf of the vcpu, and time
286 * The POWER9 path is simpler, one vcpu per virtual core so the
287 * former case does not exist. If a vcpu is preempted when it is
318 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_hv() argument
320 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
325 if (vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
326 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); in kvmppc_core_vcpu_load_hv()
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
328 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
338 * vcpu, and once it is set to this vcpu, only this task in kvmppc_core_vcpu_load_hv()
341 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
344 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
345 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
346 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
347 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
348 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
350 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
353 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_hv() argument
355 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
364 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); in kvmppc_core_vcpu_put_hv()
366 * Account stolen time when preempted while the vcpu task is in kvmppc_core_vcpu_put_hv()
370 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
371 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
377 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
380 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
381 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
382 vcpu->arch.busy_preempt = now; in kvmppc_core_vcpu_put_hv()
383 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
386 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_hv() argument
388 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
412 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) in kvmppc_set_arch_compat() argument
415 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
471 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR); in kvmppc_set_arch_compat()
482 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) in kvmppc_dump_regs() argument
486 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
488 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
491 r, kvmppc_get_gpr(vcpu, r), in kvmppc_dump_regs()
492 r+16, kvmppc_get_gpr(vcpu, r+16)); in kvmppc_dump_regs()
494 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
496 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
498 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
500 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
502 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
503 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
505 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
506 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
507 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
509 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
511 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
512 vcpu->arch.last_inst); in kvmppc_dump_regs()
520 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) in init_vpa() argument
526 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, in set_vpa() argument
532 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
538 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
558 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, in do_h_register_vpa() argument
562 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
675 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap, in kvmppc_update_vpa() argument
678 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
693 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
698 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
725 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) in kvmppc_update_vpas() argument
727 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpas()
730 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
731 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
732 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
735 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
736 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
737 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); in kvmppc_update_vpas()
740 kvmhv_nestedv2_set_vpa(vcpu, ~0ull); in kvmppc_update_vpas()
744 if (vcpu->arch.vpa.pinned_addr) { in kvmppc_update_vpas()
745 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
747 kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr)); in kvmppc_update_vpas()
750 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
751 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); in kvmppc_update_vpas()
755 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
756 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
758 if (vcpu->arch.slb_shadow.update_pending) { in kvmppc_update_vpas()
759 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); in kvmppc_update_vpas()
765 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
788 static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, in __kvmppc_create_dtl_entry() argument
795 dt = vcpu->arch.dtl_ptr; in __kvmppc_create_dtl_entry()
802 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); in __kvmppc_create_dtl_entry()
808 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in __kvmppc_create_dtl_entry()
809 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in __kvmppc_create_dtl_entry()
812 if (dt == vcpu->arch.dtl.pinned_end) in __kvmppc_create_dtl_entry()
813 dt = vcpu->arch.dtl.pinned_addr; in __kvmppc_create_dtl_entry()
814 vcpu->arch.dtl_ptr = dt; in __kvmppc_create_dtl_entry()
817 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in __kvmppc_create_dtl_entry()
819 /* vcpu->arch.dtl.dirty is set by the caller */ in __kvmppc_create_dtl_entry()
822 static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, in kvmppc_update_vpa_dispatch() argument
831 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch()
838 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch()
839 vcpu->arch.stolen_logged = core_stolen; in kvmppc_update_vpa_dispatch()
840 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
841 stolen += vcpu->arch.busy_stolen; in kvmppc_update_vpa_dispatch()
842 vcpu->arch.busy_stolen = 0; in kvmppc_update_vpa_dispatch()
843 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
847 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen); in kvmppc_update_vpa_dispatch()
849 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch()
852 static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu, in kvmppc_update_vpa_dispatch_p9() argument
860 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch_p9()
865 stolen_delta = stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch_p9()
866 vcpu->arch.stolen_logged = stolen; in kvmppc_update_vpa_dispatch_p9()
870 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); in kvmppc_update_vpa_dispatch_p9()
872 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch_p9()
875 /* See if there is a doorbell interrupt pending for a vcpu */
876 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) in kvmppc_doorbell_pending() argument
881 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
887 * of vcpu->doorbell_request. This barrier matches the in kvmppc_doorbell_pending()
891 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
892 thr = vcpu->vcpu_id - vc->first_vcpuid; in kvmppc_doorbell_pending()
896 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) in kvmppc_power8_compatible() argument
898 if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207) in kvmppc_power8_compatible()
900 if ((!kvmppc_get_arch_compat(vcpu)) && in kvmppc_power8_compatible()
906 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, in kvmppc_h_set_mode() argument
912 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
921 kvmppc_set_ciabr_hv(vcpu, value1); in kvmppc_h_set_mode()
924 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
932 kvmppc_set_dawr0_hv(vcpu, value1); in kvmppc_h_set_mode()
933 kvmppc_set_dawrx0_hv(vcpu, value2); in kvmppc_h_set_mode()
936 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
942 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
948 kvmppc_set_dawr1_hv(vcpu, value1); in kvmppc_h_set_mode()
949 kvmppc_set_dawrx1_hv(vcpu, value2); in kvmppc_h_set_mode()
957 kvmhv_vcpu_is_radix(vcpu) && mflags == 3) in kvmppc_h_set_mode()
1007 static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_page_init() argument
1025 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); in kvmppc_h_page_init()
1029 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); in kvmppc_h_page_init()
1050 * In the case of the P9 single vcpu per vcore case, the real in kvm_arch_vcpu_yield_to()
1066 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) in kvmppc_get_yield_count() argument
1071 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1072 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1075 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1084 static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) in kvmppc_nested_h_rpt_invalidate() argument
1086 unsigned long type = kvmppc_get_gpr(vcpu, 6); in kvmppc_nested_h_rpt_invalidate()
1095 pid = kvmppc_get_gpr(vcpu, 4); in kvmppc_nested_h_rpt_invalidate()
1096 pg_sizes = kvmppc_get_gpr(vcpu, 7); in kvmppc_nested_h_rpt_invalidate()
1097 start = kvmppc_get_gpr(vcpu, 8); in kvmppc_nested_h_rpt_invalidate()
1098 end = kvmppc_get_gpr(vcpu, 9); in kvmppc_nested_h_rpt_invalidate()
1100 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
1103 kvmppc_set_gpr(vcpu, 3, H_SUCCESS); in kvmppc_nested_h_rpt_invalidate()
1107 static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, in kvmppc_h_rpt_invalidate() argument
1112 if (!kvm_is_radix(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1122 if (!nesting_enabled(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1129 return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, in kvmppc_h_rpt_invalidate()
1136 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
1141 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) in kvmppc_pseries_do_hcall() argument
1143 struct kvm *kvm = vcpu->kvm; in kvmppc_pseries_do_hcall()
1144 unsigned long req = kvmppc_get_gpr(vcpu, 3); in kvmppc_pseries_do_hcall()
1151 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1156 ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1157 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1158 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1163 ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1164 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1165 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
1166 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
1171 ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1172 kvmppc_get_gpr(vcpu, 5)); in kvmppc_pseries_do_hcall()
1177 ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1178 kvmppc_get_gpr(vcpu, 5)); in kvmppc_pseries_do_hcall()
1183 ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1184 kvmppc_get_gpr(vcpu, 5)); in kvmppc_pseries_do_hcall()
1189 ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1190 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1191 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1196 ret = kvmppc_h_bulk_remove(vcpu); in kvmppc_pseries_do_hcall()
1204 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
1216 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
1224 yield_count = kvmppc_get_gpr(vcpu, 5); in kvmppc_pseries_do_hcall()
1230 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1231 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1232 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1239 rc = kvmppc_rtas_hcall(vcpu); in kvmppc_pseries_do_hcall()
1250 ret = kvmppc_h_logical_ci_load(vcpu); in kvmppc_pseries_do_hcall()
1255 ret = kvmppc_h_logical_ci_store(vcpu); in kvmppc_pseries_do_hcall()
1260 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1261 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1262 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
1263 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
1273 if (kvmppc_xics_enabled(vcpu)) { in kvmppc_pseries_do_hcall()
1278 ret = kvmppc_xics_hcall(vcpu, req); in kvmppc_pseries_do_hcall()
1283 ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); in kvmppc_pseries_do_hcall()
1286 ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1287 kvmppc_get_gpr(vcpu, 5)); in kvmppc_pseries_do_hcall()
1291 ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1292 kvmppc_get_gpr(vcpu, 5)); in kvmppc_pseries_do_hcall()
1297 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1298 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1299 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1304 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1305 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1306 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
1307 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
1312 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1313 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1314 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
1315 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
1325 kvmppc_set_gpr(vcpu, 4, rand); in kvmppc_pseries_do_hcall()
1329 ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1330 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1331 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
1332 kvmppc_get_gpr(vcpu, 7), in kvmppc_pseries_do_hcall()
1333 kvmppc_get_gpr(vcpu, 8), in kvmppc_pseries_do_hcall()
1334 kvmppc_get_gpr(vcpu, 9)); in kvmppc_pseries_do_hcall()
1340 ret = kvmhv_set_partition_table(vcpu); in kvmppc_pseries_do_hcall()
1346 ret = kvmhv_enter_nested_guest(vcpu); in kvmppc_pseries_do_hcall()
1348 kvmppc_set_gpr(vcpu, 3, 0); in kvmppc_pseries_do_hcall()
1349 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1352 kvmppc_set_gpr(vcpu, 3, 0); in kvmppc_pseries_do_hcall()
1353 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1360 ret = kvmhv_do_nested_tlbie(vcpu); in kvmppc_pseries_do_hcall()
1365 ret = kvmhv_copy_tofrom_guest_nested(vcpu); in kvmppc_pseries_do_hcall()
1368 ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1369 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1370 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1374 if (kvmppc_get_srr1(vcpu) & MSR_S) in kvmppc_pseries_do_hcall()
1376 kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1377 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1378 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1382 if (kvmppc_get_srr1(vcpu) & MSR_S) in kvmppc_pseries_do_hcall()
1384 kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
1385 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
1386 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
1390 if (kvmppc_get_srr1(vcpu) & MSR_S) in kvmppc_pseries_do_hcall()
1395 if (kvmppc_get_srr1(vcpu) & MSR_S) in kvmppc_pseries_do_hcall()
1413 kvmppc_set_gpr(vcpu, 3, ret); in kvmppc_pseries_do_hcall()
1414 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1425 static void kvmppc_cede(struct kvm_vcpu *vcpu) in kvmppc_cede() argument
1427 __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE); in kvmppc_cede()
1428 vcpu->arch.ceded = 1; in kvmppc_cede()
1430 if (vcpu->arch.prodded) { in kvmppc_cede()
1431 vcpu->arch.prodded = 0; in kvmppc_cede()
1433 vcpu->arch.ceded = 0; in kvmppc_cede()
1470 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) in kvmppc_emulate_debug_inst() argument
1474 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != in kvmppc_emulate_debug_inst()
1484 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in kvmppc_emulate_debug_inst()
1485 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1488 kvmppc_core_queue_program(vcpu, SRR1_PROGILL | in kvmppc_emulate_debug_inst()
1489 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); in kvmppc_emulate_debug_inst()
1498 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu) in kvmppc_read_dpdes() argument
1504 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1506 cpu = vcpu->vcpu_id & ~(nthreads - 1); in kvmppc_read_dpdes()
1508 v = kvmppc_find_vcpu(vcpu->kvm, cpu); in kvmppc_read_dpdes()
1512 * If the vcpu is currently running on a physical cpu thread, in kvmppc_read_dpdes()
1531 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) in kvmppc_emulate_doorbell_instr() argument
1535 struct kvm *kvm = vcpu->kvm; in kvmppc_emulate_doorbell_instr()
1539 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE) in kvmppc_emulate_doorbell_instr()
1545 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1548 arg = kvmppc_get_gpr(vcpu, rb); in kvmppc_emulate_doorbell_instr()
1554 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); in kvmppc_emulate_doorbell_instr()
1563 arg = kvmppc_get_gpr(vcpu, rb); in kvmppc_emulate_doorbell_instr()
1566 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1567 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1575 arg = kvmppc_read_dpdes(vcpu); in kvmppc_emulate_doorbell_instr()
1580 kvmppc_set_gpr(vcpu, get_rt(inst), arg); in kvmppc_emulate_doorbell_instr()
1585 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); in kvmppc_emulate_doorbell_instr()
1596 static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu) in kvmppc_pmu_unavailable() argument
1598 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) in kvmppc_pmu_unavailable()
1601 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM); in kvmppc_pmu_unavailable()
1606 static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) in kvmppc_ebb_unavailable() argument
1608 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) in kvmppc_ebb_unavailable()
1611 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB); in kvmppc_ebb_unavailable()
1616 static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu) in kvmppc_tm_unavailable() argument
1618 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) in kvmppc_tm_unavailable()
1621 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); in kvmppc_tm_unavailable()
1626 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, in kvmppc_handle_exit_hv() argument
1629 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_hv()
1632 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
1642 if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) { in kvmppc_handle_exit_hv()
1645 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1646 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1647 kvmppc_dump_regs(vcpu); in kvmppc_handle_exit_hv()
1649 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1654 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1658 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_exit_hv()
1661 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
1667 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
1684 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1692 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1693 ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) | in kvmppc_handle_exit_hv()
1694 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_handle_exit_hv()
1695 kvmppc_core_queue_machine_check(vcpu, flags); in kvmppc_handle_exit_hv()
1702 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1706 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1723 flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) | in kvmppc_handle_exit_hv()
1724 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); in kvmppc_handle_exit_hv()
1725 kvmppc_core_queue_program(vcpu, flags); in kvmppc_handle_exit_hv()
1733 if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { in kvmppc_handle_exit_hv()
1739 if (!kvmhv_vcpu_is_radix(vcpu)) { in kvmppc_handle_exit_hv()
1745 kvmppc_core_queue_syscall(vcpu); in kvmppc_handle_exit_hv()
1753 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); in kvmppc_handle_exit_hv()
1764 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
1766 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
1768 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1787 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { in kvmppc_handle_exit_hv()
1792 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1803 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1804 kvmppc_core_queue_data_storage(vcpu, in kvmppc_handle_exit_hv()
1805 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, in kvmppc_handle_exit_hv()
1806 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1811 if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR)) in kvmppc_handle_exit_hv()
1812 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1814 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1816 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1817 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1823 kvmppc_core_queue_data_storage(vcpu, in kvmppc_handle_exit_hv()
1824 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, in kvmppc_handle_exit_hv()
1825 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1834 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1835 vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) & in kvmppc_handle_exit_hv()
1837 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1844 if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1845 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1850 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1851 kvmppc_core_queue_inst_storage(vcpu, in kvmppc_handle_exit_hv()
1852 vcpu->arch.fault_dsisr | in kvmppc_handle_exit_hv()
1853 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); in kvmppc_handle_exit_hv()
1858 if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR)) in kvmppc_handle_exit_hv()
1859 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1861 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1863 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1864 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1870 kvmppc_core_queue_inst_storage(vcpu, in kvmppc_handle_exit_hv()
1871 err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); in kvmppc_handle_exit_hv()
1885 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1886 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1887 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1888 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1889 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
1890 r = kvmppc_emulate_debug_inst(vcpu); in kvmppc_handle_exit_hv()
1892 kvmppc_core_queue_program(vcpu, SRR1_PROGILL | in kvmppc_handle_exit_hv()
1893 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); in kvmppc_handle_exit_hv()
1906 r = kvmhv_p9_tm_emulation(vcpu); in kvmppc_handle_exit_hv()
1920 u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56; in kvmppc_handle_exit_hv()
1925 r = kvmppc_emulate_doorbell_instr(vcpu); in kvmppc_handle_exit_hv()
1927 r = kvmppc_pmu_unavailable(vcpu); in kvmppc_handle_exit_hv()
1929 r = kvmppc_ebb_unavailable(vcpu); in kvmppc_handle_exit_hv()
1931 r = kvmppc_tm_unavailable(vcpu); in kvmppc_handle_exit_hv()
1934 kvmppc_core_queue_program(vcpu, SRR1_PROGILL | in kvmppc_handle_exit_hv()
1935 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); in kvmppc_handle_exit_hv()
1945 kvmppc_dump_regs(vcpu); in kvmppc_handle_exit_hv()
1947 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1948 __kvmppc_get_msr_hv(vcpu)); in kvmppc_handle_exit_hv()
1949 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1957 static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) in kvmppc_handle_nested_exit() argument
1962 vcpu->stat.sum_exits++; in kvmppc_handle_nested_exit()
1972 if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) { in kvmppc_handle_nested_exit()
1975 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1976 __kvmppc_get_msr_hv(vcpu)); in kvmppc_handle_nested_exit()
1977 kvmppc_dump_regs(vcpu); in kvmppc_handle_nested_exit()
1980 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1983 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
1987 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1992 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1997 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_nested_exit()
1998 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
2015 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
2025 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
2026 r = kvmhv_nested_page_fault(vcpu); in kvmppc_handle_nested_exit()
2027 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
2030 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
2031 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
2033 if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
2034 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
2035 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
2036 r = kvmhv_nested_page_fault(vcpu); in kvmppc_handle_nested_exit()
2037 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
2048 r = kvmhv_p9_tm_emulation(vcpu); in kvmppc_handle_nested_exit()
2055 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_nested_exit()
2063 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || in kvmppc_handle_nested_exit()
2064 (vcpu->arch.nested_hfscr & (1UL << cause))) { in kvmppc_handle_nested_exit()
2066 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; in kvmppc_handle_nested_exit()
2072 r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); in kvmppc_handle_nested_exit()
2073 vcpu->arch.emul_inst = ppc_inst_val(pinst); in kvmppc_handle_nested_exit()
2086 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
2089 kvmppc_xics_rm_complete(vcpu, 0); in kvmppc_handle_nested_exit()
2093 unsigned long req = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_nested_exit()
2101 r = kvmppc_nested_h_rpt_invalidate(vcpu); in kvmppc_handle_nested_exit()
2116 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_hv() argument
2122 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2123 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
2124 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2125 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2131 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_hv() argument
2137 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
2141 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2143 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2144 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2148 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2196 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, in kvmppc_set_lpcr() argument
2199 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
2200 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2223 * MSR_LE bit in the intr_msr for each vcpu in this vcore. in kvmppc_set_lpcr()
2226 struct kvm_vcpu *vcpu; in kvmppc_set_lpcr() local
2229 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_set_lpcr()
2230 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2233 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2235 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2240 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); in kvmppc_set_lpcr()
2245 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_hv() argument
2259 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2262 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2265 *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2268 *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2271 *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2274 *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2277 *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu)); in kvmppc_get_one_reg_hv()
2281 *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i)); in kvmppc_get_one_reg_hv()
2284 *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2)); in kvmppc_get_one_reg_hv()
2287 *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu)); in kvmppc_get_one_reg_hv()
2290 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2293 *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3)); in kvmppc_get_one_reg_hv()
2297 *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i)); in kvmppc_get_one_reg_hv()
2301 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2304 *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); in kvmppc_get_one_reg_hv()
2307 *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); in kvmppc_get_one_reg_hv()
2310 *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); in kvmppc_get_one_reg_hv()
2313 *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1)); in kvmppc_get_one_reg_hv()
2316 *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2)); in kvmppc_get_one_reg_hv()
2319 *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2322 *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu)); in kvmppc_get_one_reg_hv()
2327 * we return 1 bit for each vcpu, which can come from in kvmppc_get_one_reg_hv()
2332 *val = get_reg_val(id, vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2334 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2337 *val = get_reg_val(id, kvmppc_get_vtb(vcpu)); in kvmppc_get_one_reg_hv()
2340 *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu)); in kvmppc_get_one_reg_hv()
2343 *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu)); in kvmppc_get_one_reg_hv()
2346 *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu)); in kvmppc_get_one_reg_hv()
2349 *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu)); in kvmppc_get_one_reg_hv()
2352 *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2355 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2358 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2361 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2364 *val = get_reg_val(id, kvmppc_get_pid(vcpu)); in kvmppc_get_one_reg_hv()
2367 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2370 *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu)); in kvmppc_get_one_reg_hv()
2373 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2376 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2379 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2380 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2381 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2384 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2385 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2386 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2387 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2390 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2391 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2392 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2393 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2396 *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu)); in kvmppc_get_one_reg_hv()
2400 *val = get_reg_val(id, kvmppc_get_lpcr(vcpu)); in kvmppc_get_one_reg_hv()
2403 *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2407 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2410 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2413 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2417 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2425 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2428 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2435 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2438 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2441 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2444 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2447 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2450 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2453 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2456 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2460 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2465 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2468 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2472 *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu)); in kvmppc_get_one_reg_hv()
2475 *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu)); in kvmppc_get_one_reg_hv()
2478 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2481 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2484 *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu)); in kvmppc_get_one_reg_hv()
2494 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_hv() argument
2508 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2511 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2514 kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2517 kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2520 kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2523 kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2526 kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2530 kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2533 kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2536 kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2539 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2542 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
2546 kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2550 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2553 kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2556 kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2559 kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2562 kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2565 kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2568 kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2571 kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2575 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; in kvmppc_set_one_reg_hv()
2577 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2580 kvmppc_set_vtb(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2583 kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2586 kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); in kvmppc_set_one_reg_hv()
2589 kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2592 kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); in kvmppc_set_one_reg_hv()
2595 kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2597 if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
2598 kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV); in kvmppc_set_one_reg_hv()
2601 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2604 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2607 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2610 kvmppc_set_pid(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2613 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2616 kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2619 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2622 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2627 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2628 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2630 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2636 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2638 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2645 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2648 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2659 * a migrated vcpu at least starts with an expired in kvmppc_set_one_reg_hv()
2663 kvmppc_set_tb_offset(vcpu, tb_offset); in kvmppc_set_one_reg_hv()
2664 if (!kvmppc_get_dec_expires(vcpu) && tb_offset) in kvmppc_set_one_reg_hv()
2665 kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset); in kvmppc_set_one_reg_hv()
2667 kvmppc_set_tb_offset(vcpu, tb_offset); in kvmppc_set_one_reg_hv()
2671 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); in kvmppc_set_one_reg_hv()
2674 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); in kvmppc_set_one_reg_hv()
2677 kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2681 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2684 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2687 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2691 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2699 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2702 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2708 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2711 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2714 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2717 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2720 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2723 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2726 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2729 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2733 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2738 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2741 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2745 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2748 kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2752 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2753 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2754 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2755 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2756 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2759 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2762 kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
2832 struct kvm_vcpu *vcpu; member
2839 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open() local
2846 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
2847 p->vcpu = vcpu; in debugfs_timings_open()
2857 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
2866 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read() local
2882 ((unsigned long)vcpu + timings[i].offset); in debugfs_timings_read()
2942 /* Create a debugfs directory for the vcpu */
2943 static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvmppc_arch_create_vcpu_debugfs_hv() argument
2946 debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, in kvmppc_arch_create_vcpu_debugfs_hv()
2952 static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvmppc_arch_create_vcpu_debugfs_hv() argument
2958 static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_create_hv() argument
2966 kvm = vcpu->kvm; in kvmppc_core_vcpu_create_hv()
2967 id = vcpu->vcpu_id; in kvmppc_core_vcpu_create_hv()
2969 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2976 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2978 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2983 err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_create_hv()
2988 kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC); in kvmppc_core_vcpu_create_hv()
2990 kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT); in kvmppc_core_vcpu_create_hv()
2991 kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE); in kvmppc_core_vcpu_create_hv()
2994 kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH); in kvmppc_core_vcpu_create_hv()
2996 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); in kvmppc_core_vcpu_create_hv()
2997 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2998 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2999 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
3000 __kvmppc_set_msr_hv(vcpu, MSR_ME); in kvmppc_core_vcpu_create_hv()
3001 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
3010 kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
3015 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX); in kvmppc_core_vcpu_create_hv()
3018 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR)); in kvmppc_core_vcpu_create_hv()
3022 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); in kvmppc_core_vcpu_create_hv()
3026 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
3028 vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu); in kvmppc_core_vcpu_create_hv()
3033 kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM)); in kvmppc_core_vcpu_create_hv()
3035 kvmppc_mmu_book3s_hv_init(vcpu); in kvmppc_core_vcpu_create_hv()
3037 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
3039 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
3046 pr_devel("KVM: VCPU ID too high\n"); in kvmppc_core_vcpu_create_hv()
3082 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
3083 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
3084 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
3085 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
3087 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
3088 kvmppc_sanity_check(vcpu); in kvmppc_core_vcpu_create_hv()
3113 * so each vcpu gets its own vcore. in kvmhv_set_smt_mode()
3137 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_hv() argument
3139 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3140 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
3141 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
3142 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
3143 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3145 kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_free_hv()
3148 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_hv() argument
3154 static void kvmppc_set_timer(struct kvm_vcpu *vcpu) in kvmppc_set_timer() argument
3159 if (now > kvmppc_dec_expires_host_tb(vcpu)) { in kvmppc_set_timer()
3161 kvmppc_core_queue_dec(vcpu); in kvmppc_set_timer()
3162 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_set_timer()
3165 dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); in kvmppc_set_timer()
3166 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
3167 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
3173 struct kvm_vcpu *vcpu, u64 tb) in kvmppc_remove_runnable() argument
3177 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
3179 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3181 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
3182 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
3183 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
3184 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
3185 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3187 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
3237 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) in radix_flush_cpu() argument
3239 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
3273 struct kvm_vcpu *vcpu = arg; in do_migrate_away_vcpu() local
3274 struct kvm *kvm = vcpu->kvm; in do_migrate_away_vcpu()
3290 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) in kvmppc_prepare_radix_vcpu() argument
3292 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
3293 struct kvm *kvm = vcpu->kvm; in kvmppc_prepare_radix_vcpu()
3300 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
3302 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3308 * used on one vcpu. However, that doesn't mean it has in kvmppc_prepare_radix_vcpu()
3311 * a vcpu moves from one pcpu to another, we need to tell in kvmppc_prepare_radix_vcpu()
3312 * any vcpus running on the same core as this vcpu previously in kvmppc_prepare_radix_vcpu()
3319 radix_flush_cpu(kvm, prev_cpu, vcpu); in kvmppc_prepare_radix_vcpu()
3322 do_migrate_away_vcpu, vcpu, 1); in kvmppc_prepare_radix_vcpu()
3325 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3327 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3331 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) in kvmppc_start_thread() argument
3337 if (vcpu) { in kvmppc_start_thread()
3338 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3339 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3340 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3342 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3343 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
3344 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3347 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
3420 * These are vcores that could run but their runner VCPU tasks are
3581 struct kvm_vcpu *vcpu; in prepare_threads() local
3583 for_each_runnable_thread(i, vcpu, vc) { in prepare_threads()
3584 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3585 vcpu->arch.ret = -EINTR; in prepare_threads()
3586 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3587 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3588 vcpu->arch.dtl.update_pending) in prepare_threads()
3589 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3592 kvmppc_remove_runnable(vc, vcpu, mftb()); in prepare_threads()
3593 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3631 struct kvm_vcpu *vcpu; in recheck_signals_and_mmu() local
3638 for_each_runnable_thread(i, vcpu, vc) in recheck_signals_and_mmu()
3639 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3650 struct kvm_vcpu *vcpu; in post_guest_process() local
3654 for_each_runnable_thread(i, vcpu, vc) { in post_guest_process()
3658 * the vcpu, and the vcore state is VCORE_EXITING here, in post_guest_process()
3664 if (now < kvmppc_dec_expires_host_tb(vcpu) && in post_guest_process()
3665 kvmppc_core_pending_dec(vcpu)) in post_guest_process()
3666 kvmppc_core_dequeue_dec(vcpu); in post_guest_process()
3668 trace_kvm_guest_exit(vcpu); in post_guest_process()
3671 if (vcpu->arch.trap) in post_guest_process()
3672 ret = kvmppc_handle_exit_hv(vcpu, in post_guest_process()
3673 vcpu->arch.run_task); in post_guest_process()
3675 vcpu->arch.ret = ret; in post_guest_process()
3676 vcpu->arch.trap = 0; in post_guest_process()
3679 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3680 if (vcpu->arch.pending_exceptions) in post_guest_process()
3681 kvmppc_core_prepare_to_enter(vcpu); in post_guest_process()
3682 if (vcpu->arch.ceded) in post_guest_process()
3683 kvmppc_set_timer(vcpu); in post_guest_process()
3687 kvmppc_remove_runnable(vc, vcpu, mftb()); in post_guest_process()
3688 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3703 vcpu = next_runnable_thread(vc, &i); in post_guest_process()
3704 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3776 struct kvm_vcpu *vcpu; in kvmppc_run_core() local
3825 for_each_runnable_thread(i, vcpu, vc) { in kvmppc_run_core()
3826 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3827 kvmppc_remove_runnable(vc, vcpu, mftb()); in kvmppc_run_core()
3828 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3957 for_each_runnable_thread(i, vcpu, pvc) { in kvmppc_run_core()
3960 * It updates vcpu->cpu and vcpu->arch.thread_cpu in kvmppc_run_core()
3966 kvmppc_start_thread(vcpu, pvc); in kvmppc_run_core()
3967 kvmppc_update_vpa_dispatch(vcpu, pvc); in kvmppc_run_core()
3968 trace_kvm_guest_enter(vcpu); in kvmppc_run_core()
3969 if (!vcpu->arch.ptid) in kvmppc_run_core()
3971 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3975 * even if it doesn't have a vcpu. in kvmppc_run_core()
4024 /* prevent other vcpu threads from doing kvmppc_start_thread() now */ in kvmppc_run_core()
4077 /* make sure updates to secondary vcpu structs are visible now */ in kvmppc_run_core()
4100 static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) in vcpu_vpa_increment_dispatch() argument
4102 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in vcpu_vpa_increment_dispatch()
4106 vcpu->arch.vpa.dirty = 1; in vcpu_vpa_increment_dispatch()
4110 static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, in kvmhv_vcpu_entry_nestedv2() argument
4118 io = &vcpu->arch.nestedv2_io; in kvmhv_vcpu_entry_nestedv2()
4121 kvmppc_msr_hard_disable_set_facilities(vcpu, msr); in kvmhv_vcpu_entry_nestedv2()
4125 rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit); in kvmhv_vcpu_entry_nestedv2()
4131 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_nestedv2()
4132 rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, in kvmhv_vcpu_entry_nestedv2()
4136 pr_err("KVM Guest Run VCPU hcall failed\n"); in kvmhv_vcpu_entry_nestedv2()
4138 pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i); in kvmhv_vcpu_entry_nestedv2()
4140 pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i); in kvmhv_vcpu_entry_nestedv2()
4142 pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i); in kvmhv_vcpu_entry_nestedv2()
4145 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_nestedv2()
4152 rc = kvmhv_nestedv2_parse_output(vcpu); in kvmhv_vcpu_entry_nestedv2()
4162 static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u6… in kvmhv_vcpu_entry_p9_nested() argument
4183 kvmppc_msr_hard_disable_set_facilities(vcpu, msr); in kvmhv_vcpu_entry_p9_nested()
4187 if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) in kvmhv_vcpu_entry_p9_nested()
4190 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4191 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_vcpu_entry_p9_nested()
4193 kvmhv_save_hv_regs(vcpu, &hvregs); in kvmhv_vcpu_entry_p9_nested()
4196 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_vcpu_entry_p9_nested()
4198 if (vcpu->arch.nested) { in kvmhv_vcpu_entry_p9_nested()
4199 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_vcpu_entry_p9_nested()
4200 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4202 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_vcpu_entry_p9_nested()
4203 hvregs.vcpu_token = vcpu->vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4221 mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); in kvmhv_vcpu_entry_p9_nested()
4223 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_vcpu_entry_p9_nested()
4224 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_vcpu_entry_p9_nested()
4225 switch_pmu_to_guest(vcpu, &host_os_sprs); in kvmhv_vcpu_entry_p9_nested()
4226 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_p9_nested()
4228 __pa(&vcpu->arch.regs)); in kvmhv_vcpu_entry_p9_nested()
4229 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_p9_nested()
4230 kvmhv_restore_hv_return_state(vcpu, &hvregs); in kvmhv_vcpu_entry_p9_nested()
4231 switch_pmu_to_host(vcpu, &host_os_sprs); in kvmhv_vcpu_entry_p9_nested()
4232 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_vcpu_entry_p9_nested()
4233 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_vcpu_entry_p9_nested()
4234 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_vcpu_entry_p9_nested()
4235 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_vcpu_entry_p9_nested()
4237 store_vcpu_state(vcpu); in kvmhv_vcpu_entry_p9_nested()
4243 vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu)); in kvmhv_vcpu_entry_p9_nested()
4247 restore_p9_host_os_sprs(vcpu, &host_os_sprs); in kvmhv_vcpu_entry_p9_nested()
4248 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4257 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, in kvmhv_p9_guest_entry() argument
4260 struct kvm *kvm = vcpu->kvm; in kvmhv_p9_guest_entry()
4261 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_p9_guest_entry()
4273 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4275 vcpu_vpa_increment_dispatch(vcpu); in kvmhv_p9_guest_entry()
4279 trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4281 trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4285 kvmppc_get_gpr(vcpu, 3) == H_CEDE) { in kvmhv_p9_guest_entry()
4286 kvmppc_cede(vcpu); in kvmhv_p9_guest_entry()
4287 kvmppc_set_gpr(vcpu, 3, 0); in kvmhv_p9_guest_entry()
4293 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4297 kvmppc_xive_push_vcpu(vcpu); in kvmhv_p9_guest_entry()
4300 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4304 !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { in kvmhv_p9_guest_entry()
4305 unsigned long req = kvmppc_get_gpr(vcpu, 3); in kvmhv_p9_guest_entry()
4314 kvmppc_cede(vcpu); in kvmhv_p9_guest_entry()
4315 if (!kvmppc_xive_rearm_escalation(vcpu)) { in kvmhv_p9_guest_entry()
4320 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4322 kvmppc_set_gpr(vcpu, 3, 0); in kvmhv_p9_guest_entry()
4330 if (!kvmppc_xive_rearm_escalation(vcpu)) { in kvmhv_p9_guest_entry()
4335 kvmppc_set_gpr(vcpu, 3, 0); in kvmhv_p9_guest_entry()
4342 ret = kvmppc_xive_xics_hcall(vcpu, req); in kvmhv_p9_guest_entry()
4344 kvmppc_set_gpr(vcpu, 3, ret); in kvmhv_p9_guest_entry()
4349 kvmppc_xive_pull_vcpu(vcpu); in kvmhv_p9_guest_entry()
4352 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4355 vcpu_vpa_increment_dispatch(vcpu); in kvmhv_p9_guest_entry()
4361 * Wait for some other vcpu thread to execute us, and
4365 struct kvm_vcpu *vcpu, int wait_state) in kvmppc_wait_for_exec() argument
4369 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4370 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4375 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4397 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) in xive_interrupt_pending() argument
4401 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4402 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4405 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) in xive_interrupt_pending() argument
4411 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) in kvmppc_vcpu_woken() argument
4413 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4414 kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu)) in kvmppc_vcpu_woken()
4420 static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu) in kvmppc_vcpu_check_block() argument
4422 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcpu_check_block()
4433 struct kvm_vcpu *vcpu; in kvmppc_vcore_check_block() local
4436 for_each_runnable_thread(i, vcpu, vc) { in kvmppc_vcore_check_block()
4437 if (kvmppc_vcpu_check_block(vcpu)) in kvmppc_vcore_check_block()
4562 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) in kvmhv_setup_mmu() argument
4565 struct kvm *kvm = vcpu->kvm; in kvmhv_setup_mmu()
4570 r = kvmppc_hv_setup_htab_rma(vcpu); in kvmhv_setup_mmu()
4581 static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) in kvmppc_run_vcpu() argument
4583 struct kvm_run *run = vcpu->run; in kvmppc_run_vcpu()
4588 trace_kvmppc_run_vcpu_enter(vcpu); in kvmppc_run_vcpu()
4591 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4592 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4593 kvmppc_update_vpas(vcpu); in kvmppc_run_vcpu()
4598 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4600 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4601 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4602 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4603 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4604 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4605 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4609 * This happens the first time this is called for a vcpu. in kvmppc_run_vcpu()
4617 kvmppc_update_vpa_dispatch(vcpu, vc); in kvmppc_run_vcpu()
4618 kvmppc_start_thread(vcpu, vc); in kvmppc_run_vcpu()
4619 trace_kvm_guest_enter(vcpu); in kvmppc_run_vcpu()
4626 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4629 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4631 r = kvmhv_setup_mmu(vcpu); in kvmppc_run_vcpu()
4637 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4646 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); in kvmppc_run_vcpu()
4659 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4668 vc->runner = vcpu; in kvmppc_run_vcpu()
4683 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4687 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); in kvmppc_run_vcpu()
4692 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4693 kvmppc_remove_runnable(vc, vcpu, mftb()); in kvmppc_run_vcpu()
4694 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
4696 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4700 /* Wake up some vcpu to run the core */ in kvmppc_run_vcpu()
4706 trace_kvmppc_run_vcpu_exit(vcpu); in kvmppc_run_vcpu()
4708 return vcpu->arch.ret; in kvmppc_run_vcpu()
4711 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, in kvmhv_run_single_vcpu() argument
4714 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); in kvmhv_run_single_vcpu()
4715 struct kvm_run *run = vcpu->run; in kvmhv_run_single_vcpu()
4719 struct kvm *kvm = vcpu->kvm; in kvmhv_run_single_vcpu()
4720 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4724 trace_kvmppc_run_vcpu_enter(vcpu); in kvmhv_run_single_vcpu()
4727 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4728 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4730 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4731 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4732 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4733 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4737 r = kvmhv_setup_mmu(vcpu); in kvmhv_run_single_vcpu()
4741 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4749 kvmppc_update_vpas(vcpu); in kvmhv_run_single_vcpu()
4754 kvmppc_prepare_radix_vcpu(vcpu, pcpu); in kvmhv_run_single_vcpu()
4759 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4766 vcpu->cpu = pcpu; in kvmhv_run_single_vcpu()
4767 vcpu->arch.thread_cpu = pcpu; in kvmhv_run_single_vcpu()
4769 local_paca->kvm_hstate.kvm_vcpu = vcpu; in kvmhv_run_single_vcpu()
4777 * kick a vCPU to notice the pending interrupt. in kvmhv_run_single_vcpu()
4782 kvmppc_core_prepare_to_enter(vcpu); in kvmhv_run_single_vcpu()
4784 &vcpu->arch.pending_exceptions) || in kvmhv_run_single_vcpu()
4785 xive_interrupt_pending(vcpu)) { in kvmhv_run_single_vcpu()
4791 if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE)) in kvmhv_run_single_vcpu()
4792 kvmppc_inject_interrupt_hv(vcpu, in kvmhv_run_single_vcpu()
4797 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4798 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4799 xive_interrupt_pending(vcpu)) { in kvmhv_run_single_vcpu()
4800 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4804 if (vcpu->arch.timer_running) { in kvmhv_run_single_vcpu()
4805 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmhv_run_single_vcpu()
4806 vcpu->arch.timer_running = 0; in kvmhv_run_single_vcpu()
4811 kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu)); in kvmhv_run_single_vcpu()
4813 trace_kvm_guest_enter(vcpu); in kvmhv_run_single_vcpu()
4822 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); in kvmhv_run_single_vcpu()
4823 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4832 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
4833 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4834 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4859 if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) && in kvmhv_run_single_vcpu()
4860 ((tb < kvmppc_dec_expires_host_tb(vcpu)) || in kvmhv_run_single_vcpu()
4862 kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) in kvmhv_run_single_vcpu()
4863 kvmppc_core_dequeue_dec(vcpu); in kvmhv_run_single_vcpu()
4865 trace_kvm_guest_exit(vcpu); in kvmhv_run_single_vcpu()
4869 r = kvmppc_handle_exit_hv(vcpu, current); in kvmhv_run_single_vcpu()
4871 r = kvmppc_handle_nested_exit(vcpu); in kvmhv_run_single_vcpu()
4873 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4875 if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) { in kvmhv_run_single_vcpu()
4876 kvmppc_set_timer(vcpu); in kvmhv_run_single_vcpu()
4882 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4884 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4888 if (kvmppc_vcpu_check_block(vcpu)) in kvmhv_run_single_vcpu()
4891 trace_kvmppc_vcore_blocked(vcpu, 0); in kvmhv_run_single_vcpu()
4893 trace_kvmppc_vcore_blocked(vcpu, 1); in kvmhv_run_single_vcpu()
4897 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4900 trace_kvmppc_run_vcpu_exit(vcpu); in kvmhv_run_single_vcpu()
4902 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4905 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4907 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4909 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
4910 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4911 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4917 static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_hv() argument
4919 struct kvm_run *run = vcpu->run; in kvmppc_vcpu_run_hv()
4925 start_timing(vcpu, &vcpu->arch.vcpu_entry); in kvmppc_vcpu_run_hv()
4927 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4957 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4958 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4959 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4962 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
4964 kvm = vcpu->kvm; in kvmppc_vcpu_run_hv()
4978 (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM)) in kvmppc_vcpu_run_hv()
4987 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4988 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4989 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4992 accumulate_time(vcpu, &vcpu->arch.guest_entry); in kvmppc_vcpu_run_hv()
4994 r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, in kvmppc_vcpu_run_hv()
4995 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4997 r = kvmppc_run_vcpu(vcpu); in kvmppc_vcpu_run_hv()
5000 accumulate_time(vcpu, &vcpu->arch.hcall); in kvmppc_vcpu_run_hv()
5002 if (!kvmhv_is_nestedv2() && WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { in kvmppc_vcpu_run_hv()
5012 trace_kvm_hcall_enter(vcpu); in kvmppc_vcpu_run_hv()
5013 r = kvmppc_pseries_do_hcall(vcpu); in kvmppc_vcpu_run_hv()
5014 trace_kvm_hcall_exit(vcpu, r); in kvmppc_vcpu_run_hv()
5015 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
5017 accumulate_time(vcpu, &vcpu->arch.pg_fault); in kvmppc_vcpu_run_hv()
5019 r = kvmppc_book3s_hv_page_fault(vcpu, in kvmppc_vcpu_run_hv()
5020 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
5026 r = kvmppc_xics_rm_complete(vcpu, 0); in kvmppc_vcpu_run_hv()
5029 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); in kvmppc_vcpu_run_hv()
5031 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
5036 end_timing(vcpu); in kvmppc_vcpu_run_hv()
5102 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_dirty_log_hv() local
5143 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vm_ioctl_get_dirty_log_hv()
5144 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5145 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5146 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5147 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5269 struct kvm_vcpu *vcpu; in kvmppc_update_lpcr() local
5271 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_update_lpcr()
5272 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR); in kvmppc_update_lpcr()
5302 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) in kvmppc_hv_setup_htab_rma() argument
5305 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
5366 kvmppc_map_vrma(vcpu, memslot, porder); in kvmppc_hv_setup_htab_rma()
5755 static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op_hv() argument
5761 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mtspr_hv() argument
5767 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mfspr_hv() argument
6170 static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, in kvmhv_load_from_eaddr() argument
6175 if (kvmhv_vcpu_is_radix(vcpu)) { in kvmhv_load_from_eaddr()
6176 rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); in kvmhv_load_from_eaddr()
6183 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
6189 static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, in kvmhv_store_to_eaddr() argument
6194 if (kvmhv_vcpu_is_radix(vcpu)) { in kvmhv_store_to_eaddr()
6195 rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); in kvmhv_store_to_eaddr()
6202 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
6242 struct kvm_vcpu *vcpu; in kvmhv_svm_off() local
6296 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmhv_svm_off()
6297 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6298 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
6299 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
6300 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
6301 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()