Lines Matching full:vcpu

19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 STATS_DESC_COUNTER(VCPU, idle_exits),
21 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 STATS_DESC_COUNTER(VCPU, signal_exits),
23 STATS_DESC_COUNTER(VCPU, hypercall_exits)
35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) in kvm_save_host_pmu() argument
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) in kvm_restore_host_pmu() argument
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) in kvm_save_guest_pmu() argument
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) in kvm_restore_guest_pmu() argument
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
94 static int kvm_own_pmu(struct kvm_vcpu *vcpu) in kvm_own_pmu() argument
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
101 kvm_save_host_pmu(vcpu); in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
108 kvm_restore_guest_pmu(vcpu); in kvm_own_pmu()
113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu) in kvm_lose_pmu() argument
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
121 kvm_save_guest_pmu(vcpu); in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
138 kvm_restore_host_pmu(vcpu); in kvm_lose_pmu()
141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu) in kvm_restore_pmu() argument
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
144 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_restore_pmu()
147 static void kvm_check_pmu(struct kvm_vcpu *vcpu) in kvm_check_pmu() argument
149 if (kvm_check_request(KVM_REQ_PMU, vcpu)) { in kvm_check_pmu()
150 kvm_own_pmu(vcpu); in kvm_check_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) in kvm_update_stolen_time() argument
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
170 slots = kvm_memslots(vcpu->kvm); in kvm_update_stolen_time()
172 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
196 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_update_stolen_time()
200 * kvm_check_requests - check and handle pending vCPU requests
205 static int kvm_check_requests(struct kvm_vcpu *vcpu) in kvm_check_requests() argument
207 if (!kvm_request_pending(vcpu)) in kvm_check_requests()
210 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvm_check_requests()
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
213 if (kvm_dirty_ring_check_request(vcpu)) in kvm_check_requests()
216 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in kvm_check_requests()
217 kvm_update_stolen_time(vcpu); in kvm_check_requests()
222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu) in kvm_late_check_requests() argument
225 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu)) in kvm_late_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
233 * Check and handle pending signal and vCPU requests etc
241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) in kvm_enter_guest_check() argument
248 ret = xfer_to_guest_mode_handle_work(vcpu); in kvm_enter_guest_check()
252 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_enter_guest_check()
253 ret = kvm_check_requests(vcpu); in kvm_enter_guest_check()
254 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_enter_guest_check()
265 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) in kvm_pre_enter_guest() argument
270 ret = kvm_enter_guest_check(vcpu); in kvm_pre_enter_guest()
275 * Handle vcpu timer, interrupts, check requests and in kvm_pre_enter_guest()
276 * check vmid before vcpu enter guest in kvm_pre_enter_guest()
279 kvm_deliver_intr(vcpu); in kvm_pre_enter_guest()
280 kvm_deliver_exception(vcpu); in kvm_pre_enter_guest()
281 /* Make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
282 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
283 kvm_check_vpid(vcpu); in kvm_pre_enter_guest()
284 kvm_check_pmu(vcpu); in kvm_pre_enter_guest()
291 kvm_late_check_requests(vcpu); in kvm_pre_enter_guest()
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { in kvm_pre_enter_guest()
297 kvm_lose_pmu(vcpu); in kvm_pre_enter_guest()
298 /* make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
299 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
311 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_handle_exit() argument
314 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
318 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
323 kvm_lose_pmu(vcpu); in kvm_handle_exit()
329 trace_kvm_exit(vcpu, ecode); in kvm_handle_exit()
331 ret = kvm_handle_fault(vcpu, ecode); in kvm_handle_exit()
334 ++vcpu->stat.int_exits; in kvm_handle_exit()
338 ret = kvm_pre_enter_guest(vcpu); in kvm_handle_exit()
347 trace_kvm_reenter(vcpu); in kvm_handle_exit()
352 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
354 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
355 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
358 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
360 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
363 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
375 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_get_ip() argument
377 return vcpu->arch.pc; in kvm_arch_vcpu_get_ip()
383 * any event that arrives while a vCPU is loaded is considered to be "in guest".
385 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) in kvm_arch_pmi_in_guest() argument
387 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); in kvm_arch_pmi_in_guest()
391 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_preempted_in_kernel() argument
396 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
401 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
407 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
413 ret = kvm_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
420 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
424 kvm_debug("vCPU Register Dump:\n"); in kvm_arch_vcpu_dump_regs()
425 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
426 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
430 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
431 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
443 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
446 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
451 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
458 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
467 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
474 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
476 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
481 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) in kvm_set_cpuid() argument
485 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
490 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
493 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
497 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
505 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
511 if (vcpu == map->phys_map[val].vcpu) { in kvm_set_cpuid()
512 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
517 * New CPUID is already set with other vcpu in kvm_set_cpuid()
520 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
526 map->phys_map[val].vcpu = vcpu; in kvm_set_cpuid()
527 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
532 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) in kvm_drop_cpuid() argument
536 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
538 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
544 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
546 map->phys_map[cpuid].vcpu = NULL; in kvm_drop_cpuid()
550 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
564 return map->phys_map[cpuid].vcpu; in kvm_get_vcpu_by_cpuid()
567 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) in _kvm_getcsr() argument
570 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
577 vcpu_load(vcpu); in _kvm_getcsr()
582 kvm_deliver_intr(vcpu); in _kvm_getcsr()
583 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
584 vcpu_put(vcpu); in _kvm_getcsr()
602 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) in _kvm_setcsr() argument
605 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
611 return kvm_set_cpuid(vcpu, val); in _kvm_setcsr()
627 * After modifying the PMU CSR register value of the vcpu. in _kvm_setcsr()
639 kvm_make_request(KVM_REQ_PMU, vcpu); in _kvm_setcsr()
756 static int kvm_get_one_reg(struct kvm_vcpu *vcpu, in kvm_get_one_reg() argument
765 ret = _kvm_getcsr(vcpu, id, v); in kvm_get_one_reg()
770 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
775 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
780 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
783 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
786 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
789 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
792 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
795 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
805 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
823 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_get_reg() argument
830 ret = kvm_get_one_reg(vcpu, reg, &v); in kvm_get_reg()
843 static int kvm_set_one_reg(struct kvm_vcpu *vcpu, in kvm_set_one_reg() argument
852 ret = _kvm_setcsr(vcpu, id, v); in kvm_set_one_reg()
859 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
861 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
862 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
865 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
870 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
873 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
876 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
879 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
882 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
885 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
896 * gftoffset is relative with board, not vcpu in kvm_set_one_reg()
899 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
900 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
903 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
904 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
905 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
908 * When vCPU reset, clear the ESTAT and GINTC registers in kvm_set_one_reg()
911 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); in kvm_set_one_reg()
912 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); in kvm_set_one_reg()
927 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_set_reg() argument
942 return kvm_set_one_reg(vcpu, reg, v); in kvm_set_reg()
945 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_get_sregs() argument
950 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_set_sregs() argument
955 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
959 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
960 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
962 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
967 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
971 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
972 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
974 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
975 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
980 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
987 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_has_attr() argument
1003 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_has_attr() argument
1006 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_has_attr()
1013 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_has_attr() argument
1020 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
1023 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
1032 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_get_attr() argument
1046 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1057 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_get_attr() argument
1063 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_get_attr()
1067 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1074 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_get_attr() argument
1081 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1084 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
1093 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_set_attr() argument
1098 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_cpucfg_set_attr()
1120 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_pvtime_set_attr() argument
1125 struct kvm *kvm = vcpu->kvm; in kvm_loongarch_pvtime_set_attr()
1127 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) in kvm_loongarch_pvtime_set_attr()
1138 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1149 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1150 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1151 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_loongarch_pvtime_set_attr()
1157 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_set_attr() argument
1164 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1167 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
1182 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
1188 * should be used. Since CSR registers owns by this vcpu, if switch in kvm_arch_vcpu_ioctl()
1192 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1205 r = kvm_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
1206 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1208 r = kvm_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
1217 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
1224 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1231 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1238 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
1249 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
1253 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1254 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1256 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1261 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
1265 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1266 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1268 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1274 int kvm_own_lbt(struct kvm_vcpu *vcpu) in kvm_own_lbt() argument
1276 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1281 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1282 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1288 static void kvm_lose_lbt(struct kvm_vcpu *vcpu) in kvm_lose_lbt() argument
1291 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1292 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1294 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1299 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) in kvm_check_fcsr() argument
1306 kvm_own_lbt(vcpu); in kvm_check_fcsr()
1309 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) in kvm_check_fcsr_alive() argument
1311 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1312 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1314 kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); in kvm_check_fcsr_alive()
1318 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } in kvm_lose_lbt() argument
1319 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } in kvm_check_fcsr() argument
1320 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } in kvm_check_fcsr_alive() argument
1324 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
1332 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1335 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1336 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1337 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
1344 int kvm_own_lsx(struct kvm_vcpu *vcpu) in kvm_own_lsx() argument
1346 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1352 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1354 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1360 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1366 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1370 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); in kvm_own_lsx()
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1380 int kvm_own_lasx(struct kvm_vcpu *vcpu) in kvm_own_lasx() argument
1382 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1387 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1389 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1393 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1397 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1398 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1402 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1406 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); in kvm_own_lasx()
1407 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1415 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
1419 kvm_check_fcsr_alive(vcpu); in kvm_lose_fpu()
1420 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1421 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1422 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1423 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); in kvm_lose_fpu()
1427 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1428 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1429 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1430 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); in kvm_lose_fpu()
1434 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1435 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1436 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1437 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()
1442 kvm_lose_lbt(vcpu); in kvm_lose_fpu()
1447 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1452 kvm_queue_irq(vcpu, intr); in kvm_vcpu_ioctl_interrupt()
1454 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
1460 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1469 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
1477 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
1479 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
1490 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
1495 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1496 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1498 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
1502 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); in kvm_arch_vcpu_create()
1508 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; in kvm_arch_vcpu_create()
1510 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1511 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1512 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1513 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1520 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1523 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1526 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1532 kvm_init_timer(vcpu, timer_hz); in kvm_arch_vcpu_create()
1535 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1539 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
1548 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
1552 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1557 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1558 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1559 kvm_drop_cpuid(vcpu); in kvm_arch_vcpu_destroy()
1560 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1563 * If the vCPU is freed and reused as another vCPU, we don't want the in kvm_arch_vcpu_destroy()
1567 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1568 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
1573 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_load() argument
1577 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1583 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1586 * Was this the last vCPU to run on this CPU? in _kvm_vcpu_load()
1587 * If not, any old guest state from this vCPU will have been clobbered. in _kvm_vcpu_load()
1589 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1590 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
1591 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1592 context->last_vcpu = vcpu; in _kvm_vcpu_load()
1595 kvm_restore_timer(vcpu); in _kvm_vcpu_load()
1596 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in _kvm_vcpu_load()
1599 kvm_restore_pmu(vcpu); in _kvm_vcpu_load()
1602 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1605 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1658 * prevents a SC on the next vCPU from succeeding by matching a LL on in _kvm_vcpu_load()
1659 * the previous vCPU. in _kvm_vcpu_load()
1661 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1664 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1669 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1675 _kvm_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
1679 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_put() argument
1681 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1683 kvm_lose_fpu(vcpu); in _kvm_vcpu_put()
1691 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1742 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1745 kvm_save_timer(vcpu); in _kvm_vcpu_put()
1752 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1759 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1762 _kvm_vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()
1766 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1769 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1771 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1772 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1773 kvm_complete_mmio_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1774 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1779 kvm_complete_user_service(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1783 kvm_complete_iocsr_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1787 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1793 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1794 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1795 r = kvm_pre_enter_guest(vcpu); in kvm_arch_vcpu_ioctl_run()
1801 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
1802 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1804 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
1811 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1812 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()