Lines Matching full:vcpu

18 	STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
34 * kvm_check_requests - check and handle pending vCPU requests
39 static int kvm_check_requests(struct kvm_vcpu *vcpu) in kvm_check_requests() argument
41 if (!kvm_request_pending(vcpu)) in kvm_check_requests()
44 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvm_check_requests()
45 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
47 if (kvm_dirty_ring_check_request(vcpu)) in kvm_check_requests()
54 * Check and handle pending signal and vCPU requests etc
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) in kvm_enter_guest_check() argument
69 ret = xfer_to_guest_mode_handle_work(vcpu); in kvm_enter_guest_check()
73 ret = kvm_check_requests(vcpu); in kvm_enter_guest_check()
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) in kvm_pre_enter_guest() argument
89 ret = kvm_enter_guest_check(vcpu); in kvm_pre_enter_guest()
94 * Handle vcpu timer, interrupts, check requests and in kvm_pre_enter_guest()
95 * check vmid before vcpu enter guest in kvm_pre_enter_guest()
98 kvm_deliver_intr(vcpu); in kvm_pre_enter_guest()
99 kvm_deliver_exception(vcpu); in kvm_pre_enter_guest()
100 /* Make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
101 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_pre_enter_guest()
102 kvm_check_vpid(vcpu); in kvm_pre_enter_guest()
103 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
105 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
107 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { in kvm_pre_enter_guest()
108 /* make sure the vcpu mode has been written */ in kvm_pre_enter_guest()
109 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); in kvm_pre_enter_guest()
121 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvm_handle_exit() argument
124 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
128 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_handle_exit()
137 trace_kvm_exit(vcpu, ecode); in kvm_handle_exit()
139 ret = kvm_handle_fault(vcpu, ecode); in kvm_handle_exit()
142 ++vcpu->stat.int_exits; in kvm_handle_exit()
146 ret = kvm_pre_enter_guest(vcpu); in kvm_handle_exit()
155 trace_kvm_reenter(vcpu); in kvm_handle_exit()
160 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
162 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
163 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
166 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
168 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
171 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
176 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
181 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
187 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
193 ret = kvm_pending_timer(vcpu) || in kvm_cpu_has_pending_timer()
200 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_dump_regs() argument
204 kvm_debug("vCPU Register Dump:\n"); in kvm_arch_vcpu_dump_regs()
205 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
206 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
210 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
211 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
223 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
226 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
231 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
238 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
247 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
253 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) in _kvm_getcsr() argument
256 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
277 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) in _kvm_setcsr() argument
280 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
370 static int kvm_get_one_reg(struct kvm_vcpu *vcpu, in kvm_get_one_reg() argument
379 ret = _kvm_getcsr(vcpu, id, v); in kvm_get_one_reg()
384 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
391 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
406 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_get_reg() argument
413 ret = kvm_get_one_reg(vcpu, reg, &v); in kvm_get_reg()
426 static int kvm_set_one_reg(struct kvm_vcpu *vcpu, in kvm_set_one_reg() argument
435 ret = _kvm_setcsr(vcpu, id, v); in kvm_set_one_reg()
442 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
448 * gftoffset is relative with board, not vcpu in kvm_set_one_reg()
451 if (vcpu->vcpu_id == 0) in kvm_set_one_reg()
452 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
455 kvm_reset_timer(vcpu); in kvm_set_one_reg()
456 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
457 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
472 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_set_reg() argument
487 return kvm_set_one_reg(vcpu, reg, v); in kvm_set_reg()
490 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_get_sregs() argument
495 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_arch_vcpu_ioctl_set_sregs() argument
500 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
504 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
505 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
507 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
512 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
516 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
517 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
519 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
520 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
525 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
532 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_has_attr() argument
545 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_has_attr() argument
552 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); in kvm_loongarch_vcpu_has_attr()
561 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_get_cpucfg_attr() argument
577 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_get_attr() argument
584 ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); in kvm_loongarch_vcpu_get_attr()
593 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_cpucfg_set_attr() argument
599 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, in kvm_loongarch_vcpu_set_attr() argument
606 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); in kvm_loongarch_vcpu_set_attr()
621 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
627 * should be used. Since CSR registers owns by this vcpu, if switch in kvm_arch_vcpu_ioctl()
631 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
644 r = kvm_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
645 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
647 r = kvm_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
656 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
663 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
670 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
677 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); in kvm_arch_vcpu_ioctl()
688 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
692 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
693 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
695 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
700 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
704 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
705 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
707 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
713 void kvm_own_fpu(struct kvm_vcpu *vcpu) in kvm_own_fpu() argument
720 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
721 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
722 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); in kvm_own_fpu()
729 int kvm_own_lsx(struct kvm_vcpu *vcpu) in kvm_own_lsx() argument
731 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
738 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
744 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
750 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
754 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); in kvm_own_lsx()
755 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
764 int kvm_own_lasx(struct kvm_vcpu *vcpu) in kvm_own_lasx() argument
766 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
772 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
776 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
780 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
781 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
785 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
789 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); in kvm_own_lasx()
790 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
798 void kvm_lose_fpu(struct kvm_vcpu *vcpu) in kvm_lose_fpu() argument
802 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
803 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
804 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
805 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); in kvm_lose_fpu()
809 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
810 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
811 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
812 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); in kvm_lose_fpu()
816 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
817 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
818 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
819 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); in kvm_lose_fpu()
828 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
833 kvm_queue_irq(vcpu, intr); in kvm_vcpu_ioctl_interrupt()
835 kvm_dequeue_irq(vcpu, -intr); in kvm_vcpu_ioctl_interrupt()
841 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
850 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
858 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); in kvm_arch_vcpu_async_ioctl()
860 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
871 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
876 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
878 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); in kvm_arch_vcpu_create()
879 vcpu->arch.swtimer.function = kvm_swtimer_wakeup; in kvm_arch_vcpu_create()
881 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
882 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
883 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
884 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
891 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
894 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
900 kvm_init_timer(vcpu, timer_hz); in kvm_arch_vcpu_create()
903 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
907 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); in kvm_arch_vcpu_create()
915 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
919 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
924 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
925 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
926 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
929 * If the vCPU is freed and reused as another vCPU, we don't want the in kvm_arch_vcpu_destroy()
933 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
934 if (context->last_vcpu == vcpu) in kvm_arch_vcpu_destroy()
939 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_load() argument
943 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
949 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
952 * Was this the last vCPU to run on this CPU? in _kvm_vcpu_load()
953 * If not, any old guest state from this vCPU will have been clobbered. in _kvm_vcpu_load()
955 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
956 if (migrated || (context->last_vcpu != vcpu)) in _kvm_vcpu_load()
957 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
958 context->last_vcpu = vcpu; in _kvm_vcpu_load()
961 kvm_restore_timer(vcpu); in _kvm_vcpu_load()
967 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
970 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1023 * prevents a SC on the next vCPU from succeeding by matching a LL on in _kvm_vcpu_load()
1024 * the previous vCPU. in _kvm_vcpu_load()
1026 if (vcpu->kvm->created_vcpus > 1) in _kvm_vcpu_load()
1029 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1034 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
1040 _kvm_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
1044 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) in _kvm_vcpu_put() argument
1046 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1048 kvm_lose_fpu(vcpu); in _kvm_vcpu_put()
1056 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1107 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1110 kvm_save_timer(vcpu); in _kvm_vcpu_put()
1117 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
1124 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
1127 _kvm_vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()
1131 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1134 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1136 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1137 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1138 kvm_complete_mmio_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1139 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1144 kvm_complete_iocsr_read(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1153 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1154 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1155 r = kvm_pre_enter_guest(vcpu); in kvm_arch_vcpu_ioctl_run()
1161 trace_kvm_enter(vcpu); in kvm_arch_vcpu_ioctl_run()
1162 r = kvm_loongarch_ops->enter_guest(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1164 trace_kvm_out(vcpu); in kvm_arch_vcpu_ioctl_run()
1171 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1172 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()