1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #include <linux/kvm_host.h> 10 #include <asm/csr.h> 11 #include <asm/insn-def.h> 12 13 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, 14 struct kvm_cpu_trap *trap) 15 { 16 struct kvm_memory_slot *memslot; 17 unsigned long hva, fault_addr; 18 bool writable; 19 gfn_t gfn; 20 int ret; 21 22 fault_addr = (trap->htval << 2) | (trap->stval & 0x3); 23 gfn = fault_addr >> PAGE_SHIFT; 24 memslot = gfn_to_memslot(vcpu->kvm, gfn); 25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); 26 27 if (kvm_is_error_hva(hva) || 28 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) { 29 switch (trap->scause) { 30 case EXC_LOAD_GUEST_PAGE_FAULT: 31 return kvm_riscv_vcpu_mmio_load(vcpu, run, 32 fault_addr, 33 trap->htinst); 34 case EXC_STORE_GUEST_PAGE_FAULT: 35 return kvm_riscv_vcpu_mmio_store(vcpu, run, 36 fault_addr, 37 trap->htinst); 38 default: 39 return -EOPNOTSUPP; 40 }; 41 } 42 43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva, 44 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false); 45 if (ret < 0) 46 return ret; 47 48 return 1; 49 } 50 51 /** 52 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory 53 * 54 * @vcpu: The VCPU pointer 55 * @read_insn: Flag representing whether we are reading instruction 56 * @guest_addr: Guest address to read 57 * @trap: Output pointer to trap details 58 */ 59 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, 60 bool read_insn, 61 unsigned long guest_addr, 62 struct kvm_cpu_trap *trap) 63 { 64 register unsigned long taddr asm("a0") = (unsigned long)trap; 65 register unsigned long ttmp asm("a1"); 66 unsigned long flags, val, tmp, old_stvec, old_hstatus; 67 68 local_irq_save(flags); 69 70 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus); 71 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap); 72 73 if (read_insn) { 74 /* 75 * HLVX.HU instruction 76 * 0110010 00011 rs1 100 rd 1110011 77 */ 78 asm volatile ("\n" 79 ".option push\n" 80 ".option norvc\n" 81 "add %[ttmp], %[taddr], 0\n" 82 HLVX_HU(%[val], %[addr]) 83 "andi %[tmp], %[val], 3\n" 84 "addi %[tmp], %[tmp], -3\n" 85 "bne %[tmp], zero, 2f\n" 86 "addi %[addr], %[addr], 2\n" 87 HLVX_HU(%[tmp], %[addr]) 88 "sll %[tmp], %[tmp], 16\n" 89 "add %[val], %[val], %[tmp]\n" 90 "2:\n" 91 ".option pop" 92 : [val] "=&r" (val), [tmp] "=&r" (tmp), 93 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp), 94 [addr] "+&r" (guest_addr) : : "memory"); 95 96 if (trap->scause == EXC_LOAD_PAGE_FAULT) 97 trap->scause = EXC_INST_PAGE_FAULT; 98 } else { 99 /* 100 * HLV.D instruction 101 * 0110110 00000 rs1 100 rd 1110011 102 * 103 * HLV.W instruction 104 * 0110100 00000 rs1 100 rd 1110011 105 */ 106 asm volatile ("\n" 107 ".option push\n" 108 ".option norvc\n" 109 "add %[ttmp], %[taddr], 0\n" 110 #ifdef CONFIG_64BIT 111 HLV_D(%[val], %[addr]) 112 #else 113 HLV_W(%[val], %[addr]) 114 #endif 115 ".option pop" 116 : [val] "=&r" (val), 117 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp) 118 : [addr] "r" (guest_addr) : "memory"); 119 } 120 121 csr_write(CSR_STVEC, old_stvec); 122 csr_write(CSR_HSTATUS, old_hstatus); 123 124 local_irq_restore(flags); 125 126 return val; 127 } 128 129 /** 130 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest 131 * 132 * @vcpu: The VCPU pointer 133 * @trap: Trap details 134 */ 135 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, 136 struct kvm_cpu_trap *trap) 137 { 138 unsigned long vsstatus = csr_read(CSR_VSSTATUS); 139 140 /* Change Guest SSTATUS.SPP bit */ 141 vsstatus &= ~SR_SPP; 142 if (vcpu->arch.guest_context.sstatus & SR_SPP) 143 vsstatus |= SR_SPP; 144 145 /* Change Guest SSTATUS.SPIE bit */ 146 vsstatus &= ~SR_SPIE; 147 if (vsstatus & SR_SIE) 148 vsstatus |= SR_SPIE; 149 150 /* Clear Guest SSTATUS.SIE bit */ 151 vsstatus &= ~SR_SIE; 152 153 /* Update Guest SSTATUS */ 154 csr_write(CSR_VSSTATUS, vsstatus); 155 156 /* Update Guest SCAUSE, STVAL, and SEPC */ 157 csr_write(CSR_VSCAUSE, trap->scause); 158 csr_write(CSR_VSTVAL, trap->stval); 159 csr_write(CSR_VSEPC, trap->sepc); 160 161 /* Set Guest PC to Guest exception vector */ 162 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC); 163 164 /* Set Guest privilege mode to supervisor */ 165 vcpu->arch.guest_context.sstatus |= SR_SPP; 166 } 167 168 static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap) 169 { 170 int ret = -EFAULT; 171 172 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { 173 kvm_riscv_vcpu_trap_redirect(vcpu, trap); 174 ret = 1; 175 } 176 return ret; 177 } 178 179 /* 180 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 181 * proper exit to userspace. 182 */ 183 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 184 struct kvm_cpu_trap *trap) 185 { 186 int ret; 187 188 /* If we got host interrupt then do nothing */ 189 if (trap->scause & CAUSE_IRQ_FLAG) 190 return 1; 191 192 /* Handle guest traps */ 193 ret = -EFAULT; 194 run->exit_reason = KVM_EXIT_UNKNOWN; 195 switch (trap->scause) { 196 case EXC_INST_ILLEGAL: 197 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN); 198 vcpu->stat.instr_illegal_exits++; 199 ret = vcpu_redirect(vcpu, trap); 200 break; 201 case EXC_LOAD_MISALIGNED: 202 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD); 203 vcpu->stat.load_misaligned_exits++; 204 ret = vcpu_redirect(vcpu, trap); 205 break; 206 case EXC_STORE_MISALIGNED: 207 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE); 208 vcpu->stat.store_misaligned_exits++; 209 ret = vcpu_redirect(vcpu, trap); 210 break; 211 case EXC_LOAD_ACCESS: 212 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD); 213 vcpu->stat.load_access_exits++; 214 ret = vcpu_redirect(vcpu, trap); 215 break; 216 case EXC_STORE_ACCESS: 217 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE); 218 vcpu->stat.store_access_exits++; 219 ret = vcpu_redirect(vcpu, trap); 220 break; 221 case EXC_INST_ACCESS: 222 ret = vcpu_redirect(vcpu, trap); 223 break; 224 case EXC_VIRTUAL_INST_FAULT: 225 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 226 ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap); 227 break; 228 case EXC_INST_GUEST_PAGE_FAULT: 229 case EXC_LOAD_GUEST_PAGE_FAULT: 230 case EXC_STORE_GUEST_PAGE_FAULT: 231 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 232 ret = gstage_page_fault(vcpu, run, trap); 233 break; 234 case EXC_SUPERVISOR_SYSCALL: 235 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) 236 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run); 237 break; 238 case EXC_BREAKPOINT: 239 run->exit_reason = KVM_EXIT_DEBUG; 240 ret = 0; 241 break; 242 default: 243 break; 244 } 245 246 /* Print details in-case of error */ 247 if (ret < 0) { 248 kvm_err("VCPU exit error %d\n", ret); 249 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n", 250 vcpu->arch.guest_context.sepc, 251 vcpu->arch.guest_context.sstatus, 252 vcpu->arch.guest_context.hstatus); 253 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n", 254 trap->scause, trap->stval, trap->htval, trap->htinst); 255 } 256 257 return ret; 258 } 259