Lines Matching +full:sync +full:- +full:update +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/entry-kvm.h>
47 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu()
48 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
49 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
50 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
59 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
63 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
77 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
78 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
82 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
83 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
84 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
103 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_arch_vcpu_create()
106 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
107 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
108 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
114 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
115 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
116 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
119 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
122 cntx = &vcpu->arch.guest_reset_context; in kvm_arch_vcpu_create()
123 cntx->sstatus = SR_SPP | SR_SPIE; in kvm_arch_vcpu_create()
124 cntx->hstatus = 0; in kvm_arch_vcpu_create()
125 cntx->hstatus |= HSTATUS_VTW; in kvm_arch_vcpu_create()
126 cntx->hstatus |= HSTATUS_SPVP; in kvm_arch_vcpu_create()
127 cntx->hstatus |= HSTATUS_SPV; in kvm_arch_vcpu_create()
130 return -ENOMEM; in kvm_arch_vcpu_create()
133 reset_csr->scounteren = 0x7; in kvm_arch_vcpu_create()
162 * Keep all vcpus with non-zero id in power-off state so that in kvm_arch_vcpu_postcreate()
165 if (vcpu->vcpu_idx != 0) in kvm_arch_vcpu_postcreate()
179 /* Free unused pages pre-allocated for G-stage page table mappings */ in kvm_arch_vcpu_destroy()
180 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
203 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && in kvm_arch_vcpu_runnable()
204 !vcpu->arch.power_off && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
214 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
225 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
232 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
240 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
246 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
248 long r = -EINVAL; in kvm_arch_vcpu_ioctl()
255 r = -EFAULT; in kvm_arch_vcpu_ioctl()
270 r = -EFAULT; in kvm_arch_vcpu_ioctl()
277 r = -E2BIG; in kvm_arch_vcpu_ioctl()
280 r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
293 return -EINVAL; in kvm_arch_vcpu_ioctl_get_sregs()
299 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs()
304 return -EINVAL; in kvm_arch_vcpu_ioctl_get_fpu()
309 return -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
315 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
320 return -EINVAL; in kvm_arch_vcpu_ioctl_get_regs()
325 return -EINVAL; in kvm_arch_vcpu_ioctl_set_regs()
330 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
331 unsigned long mask, val; in kvm_riscv_vcpu_flush_interrupts() local
333 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
334 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
335 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
337 csr->hvip &= ~mask; in kvm_riscv_vcpu_flush_interrupts()
338 csr->hvip |= val; in kvm_riscv_vcpu_flush_interrupts()
348 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
349 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
352 csr->vsie = csr_read(CSR_VSIE); in kvm_riscv_vcpu_sync_interrupts()
354 /* Sync-up HVIP.VSSIP bit changes does by Guest */ in kvm_riscv_vcpu_sync_interrupts()
356 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { in kvm_riscv_vcpu_sync_interrupts()
359 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
360 set_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
363 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
364 clear_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
368 /* Sync-up AIA high interrupts */ in kvm_riscv_vcpu_sync_interrupts()
371 /* Sync-up timer CSRs */ in kvm_riscv_vcpu_sync_interrupts()
378 * We only allow VS-mode software, timer, and external in kvm_riscv_vcpu_set_interrupt()
380 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_set_interrupt()
386 return -EINVAL; in kvm_riscv_vcpu_set_interrupt()
388 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
390 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
400 * We only allow VS-mode software, timer, and external in kvm_riscv_vcpu_unset_interrupt()
402 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_unset_interrupt()
408 return -EINVAL; in kvm_riscv_vcpu_unset_interrupt()
410 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
412 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
417 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) in kvm_riscv_vcpu_has_interrupts() argument
421 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
422 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
423 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
424 (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
425 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
429 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); in kvm_riscv_vcpu_has_interrupts()
434 vcpu->arch.power_off = true; in kvm_riscv_vcpu_power_off()
441 vcpu->arch.power_off = false; in kvm_riscv_vcpu_power_on()
448 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
449 mp_state->mp_state = KVM_MP_STATE_STOPPED; in kvm_arch_vcpu_ioctl_get_mpstate()
451 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
461 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
463 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
469 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
479 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
484 const unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_setup_config()
485 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_setup_config()
488 cfg->henvcfg |= ENVCFG_PBMTE; in kvm_riscv_vcpu_setup_config()
491 cfg->henvcfg |= ENVCFG_STCE; in kvm_riscv_vcpu_setup_config()
494 cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); in kvm_riscv_vcpu_setup_config()
497 cfg->henvcfg |= ENVCFG_CBZE; in kvm_riscv_vcpu_setup_config()
500 cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; in kvm_riscv_vcpu_setup_config()
502 cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | in kvm_riscv_vcpu_setup_config()
506 cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; in kvm_riscv_vcpu_setup_config()
512 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
513 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_arch_vcpu_load()
515 csr_write(CSR_VSSTATUS, csr->vsstatus); in kvm_arch_vcpu_load()
516 csr_write(CSR_VSIE, csr->vsie); in kvm_arch_vcpu_load()
517 csr_write(CSR_VSTVEC, csr->vstvec); in kvm_arch_vcpu_load()
518 csr_write(CSR_VSSCRATCH, csr->vsscratch); in kvm_arch_vcpu_load()
519 csr_write(CSR_VSEPC, csr->vsepc); in kvm_arch_vcpu_load()
520 csr_write(CSR_VSCAUSE, csr->vscause); in kvm_arch_vcpu_load()
521 csr_write(CSR_VSTVAL, csr->vstval); in kvm_arch_vcpu_load()
522 csr_write(CSR_HVIP, csr->hvip); in kvm_arch_vcpu_load()
523 csr_write(CSR_VSATP, csr->vsatp); in kvm_arch_vcpu_load()
524 csr_write(CSR_HENVCFG, cfg->henvcfg); in kvm_arch_vcpu_load()
526 csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); in kvm_arch_vcpu_load()
528 csr_write(CSR_HSTATEEN0, cfg->hstateen0); in kvm_arch_vcpu_load()
530 csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); in kvm_arch_vcpu_load()
537 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
538 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
539 vcpu->arch.isa); in kvm_arch_vcpu_load()
540 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
541 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
542 vcpu->arch.isa); in kvm_arch_vcpu_load()
548 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
553 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
555 vcpu->cpu = -1; in kvm_arch_vcpu_put()
559 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
560 vcpu->arch.isa); in kvm_arch_vcpu_put()
561 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
564 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
565 vcpu->arch.isa); in kvm_arch_vcpu_put()
566 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
568 csr->vsstatus = csr_read(CSR_VSSTATUS); in kvm_arch_vcpu_put()
569 csr->vsie = csr_read(CSR_VSIE); in kvm_arch_vcpu_put()
570 csr->vstvec = csr_read(CSR_VSTVEC); in kvm_arch_vcpu_put()
571 csr->vsscratch = csr_read(CSR_VSSCRATCH); in kvm_arch_vcpu_put()
572 csr->vsepc = csr_read(CSR_VSEPC); in kvm_arch_vcpu_put()
573 csr->vscause = csr_read(CSR_VSCAUSE); in kvm_arch_vcpu_put()
574 csr->vstval = csr_read(CSR_VSTVAL); in kvm_arch_vcpu_put()
575 csr->hvip = csr_read(CSR_HVIP); in kvm_arch_vcpu_put()
576 csr->vsatp = csr_read(CSR_VSATP); in kvm_arch_vcpu_put()
587 (!vcpu->arch.power_off) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
591 if (vcpu->arch.power_off || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
629 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
631 csr_write(CSR_HVIP, csr->hvip); in kvm_riscv_update_hvip()
637 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_guest_state()
638 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_guest_state()
639 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_guest_state()
641 vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); in kvm_riscv_vcpu_swap_in_guest_state()
643 (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) in kvm_riscv_vcpu_swap_in_guest_state()
644 vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_guest_state()
645 smcsr->sstateen0); in kvm_riscv_vcpu_swap_in_guest_state()
650 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_host_state()
651 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_host_state()
652 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_host_state()
654 csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); in kvm_riscv_vcpu_swap_in_host_state()
656 (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) in kvm_riscv_vcpu_swap_in_host_state()
657 smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_host_state()
658 vcpu->arch.host_sstateen0); in kvm_riscv_vcpu_swap_in_host_state()
672 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
673 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
682 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
684 if (!vcpu->arch.ran_atleast_once) in kvm_arch_vcpu_ioctl_run()
688 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
692 switch (run->exit_reason) { in kvm_arch_vcpu_ioctl_run()
694 /* Process MMIO value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
695 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
698 /* Process SBI value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
699 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
702 /* Process CSR value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
703 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
714 if (run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
716 return -EINTR; in kvm_arch_vcpu_ioctl_run()
724 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
738 /* Update AIA HW state before entering guest */ in kvm_arch_vcpu_ioctl_run()
751 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
753 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
760 * so update it in HW. in kvm_arch_vcpu_ioctl_run()
764 /* Update HVIP CSR for current CPU */ in kvm_arch_vcpu_ioctl_run()
767 if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
770 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
780 * Note: This should be done after G-stage VMID has been in kvm_arch_vcpu_ioctl_run()
789 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
790 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
797 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()