Lines Matching full:vcpu
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
57 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
109 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
114 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
117 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
118 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
125 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
164 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
189 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
240 kvm_vcpu_halt(vcpu); in kvmppc_kvm_pv()
247 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
287 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
321 if (vcpu->mmio_is_write) in kvmppc_emulate_mmio()
324 kvmppc_core_queue_data_storage(vcpu, in kvmppc_emulate_mmio()
325 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, in kvmppc_emulate_mmio()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
333 kvmppc_core_queue_program(vcpu, 0); in kvmppc_emulate_mmio()
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
355 vcpu->stat.st++; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
398 vcpu->stat.ld++; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
421 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
423 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
430 kvm_vcpu_srcu_read_lock(vcpu); in kvmppc_ld()
431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
432 kvm_vcpu_srcu_read_unlock(vcpu); in kvmppc_ld()
760 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
762 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
763 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
768 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
772 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
773 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
776 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
778 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
782 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
786 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
787 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
791 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
795 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
799 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
801 /* Make sure we're not using the vcpu anymore */ in kvm_arch_vcpu_destroy()
802 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
804 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
806 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
810 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
812 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
815 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
819 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
821 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
824 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
826 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
829 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
839 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
841 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
844 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
846 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
848 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
920 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
924 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
925 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
931 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
933 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
935 kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); in kvmppc_set_vsr_dword()
939 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
943 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
946 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
949 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
951 kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); in kvmppc_set_vsr_dword_dump()
952 kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); in kvmppc_set_vsr_dword_dump()
956 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
960 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
967 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word_dump()
971 kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); in kvmppc_set_vsr_word_dump()
972 kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); in kvmppc_set_vsr_word_dump()
976 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
980 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
981 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
988 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
990 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
994 val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
996 kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); in kvmppc_set_vsr_word()
1002 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
1011 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
1019 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
1022 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
1025 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1028 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1031 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1034 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1037 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1040 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1044 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1048 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1049 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1050 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1055 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_dword()
1057 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_dword()
1060 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1064 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1065 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1066 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1071 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_word()
1073 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_word()
1076 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1080 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1081 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1082 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1087 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_hword()
1089 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_hword()
1092 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1096 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1097 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1098 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1103 kvmppc_get_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_byte()
1105 kvmppc_set_vsx_vr(vcpu, index, &val.vval); in kvmppc_set_vmx_byte()
1139 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1141 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1147 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1164 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1167 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1183 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1185 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1188 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1189 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1191 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1195 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1198 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1199 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1204 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1205 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1207 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1208 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1209 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1210 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1211 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1213 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1214 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1216 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1221 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1222 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1224 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1225 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1226 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1227 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1228 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1230 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1231 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1233 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1238 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1240 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1249 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1253 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1258 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1267 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1271 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1272 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1273 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1274 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1275 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1277 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1279 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1282 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1285 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1286 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1293 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1297 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1302 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1306 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1310 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1317 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1320 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1321 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1327 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1329 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1330 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1336 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1339 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1345 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1354 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1357 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1358 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1360 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1380 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1382 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1385 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1388 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1397 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1402 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1408 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1416 *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1418 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1425 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1435 reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1438 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1451 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1457 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1460 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1463 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1464 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1467 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1473 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1475 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1476 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1482 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1484 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1488 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1490 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1491 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1492 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1494 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1495 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1518 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1523 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1526 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1527 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1533 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1534 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1535 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1541 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1548 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1553 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_dword()
1559 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1566 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1571 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_word()
1577 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1584 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1589 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_hword()
1595 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1602 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1607 kvmppc_get_vsx_vr(vcpu, index, ®.vval); in kvmppc_get_vmx_byte()
1613 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1620 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1623 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1625 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1626 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1628 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1633 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1637 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1641 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1648 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1653 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1654 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1655 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1661 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1663 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1667 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1669 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1670 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1671 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1673 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1674 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1696 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1706 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1716 kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_get_one_reg()
1723 val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1726 val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1744 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1757 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1767 kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_set_one_reg()
1774 kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1781 kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1793 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1795 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1798 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1800 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1801 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1802 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1803 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1805 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1806 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1807 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1810 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1811 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1813 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1819 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1820 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1821 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1824 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1825 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1827 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1832 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1837 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1838 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1839 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1842 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1844 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1845 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1847 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1848 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1849 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1853 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1858 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1860 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1873 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1877 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1880 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1884 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1886 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1891 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1902 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1906 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1911 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1913 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1918 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1930 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1947 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1967 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1969 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1993 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
2003 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
2006 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2015 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2033 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2039 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2048 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
2055 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
2063 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2074 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2075 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2076 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2088 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2090 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2100 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2101 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2102 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2114 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2535 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvm_arch_create_vcpu_debugfs() argument
2537 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2538 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()