xref: /linux/arch/riscv/kvm/vcpu_sbi_replace.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_timer.h>
14 #include <asm/kvm_vcpu_pmu.h>
15 #include <asm/kvm_vcpu_sbi.h>
16 
kvm_sbi_ext_time_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
18 				    struct kvm_vcpu_sbi_return *retdata)
19 {
20 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
21 	u64 next_cycle;
22 
23 	if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
24 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
25 		return 0;
26 	}
27 
28 	kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER);
29 #if __riscv_xlen == 32
30 	next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
31 #else
32 	next_cycle = (u64)cp->a0;
33 #endif
34 	kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
35 
36 	return 0;
37 }
38 
39 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
40 	.extid_start = SBI_EXT_TIME,
41 	.extid_end = SBI_EXT_TIME,
42 	.handler = kvm_sbi_ext_time_handler,
43 };
44 
kvm_sbi_ext_ipi_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)45 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
46 				   struct kvm_vcpu_sbi_return *retdata)
47 {
48 	int ret = 0;
49 	unsigned long i;
50 	struct kvm_vcpu *tmp;
51 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
52 	unsigned long hmask = cp->a0;
53 	unsigned long hbase = cp->a1;
54 	unsigned long hart_bit = 0, sentmask = 0;
55 
56 	if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
57 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
58 		return 0;
59 	}
60 
61 	kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT);
62 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
63 		if (hbase != -1UL) {
64 			if (tmp->vcpu_id < hbase)
65 				continue;
66 			hart_bit = tmp->vcpu_id - hbase;
67 			if (hart_bit >= __riscv_xlen)
68 				goto done;
69 			if (!(hmask & (1UL << hart_bit)))
70 				continue;
71 		}
72 		ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
73 		if (ret < 0)
74 			break;
75 		sentmask |= 1UL << hart_bit;
76 		kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
77 	}
78 
79 done:
80 	if (hbase != -1UL && (hmask ^ sentmask))
81 		retdata->err_val = SBI_ERR_INVALID_PARAM;
82 
83 	return ret;
84 }
85 
86 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
87 	.extid_start = SBI_EXT_IPI,
88 	.extid_end = SBI_EXT_IPI,
89 	.handler = kvm_sbi_ext_ipi_handler,
90 };
91 
kvm_sbi_ext_rfence_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)92 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
93 				      struct kvm_vcpu_sbi_return *retdata)
94 {
95 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
96 	unsigned long hmask = cp->a0;
97 	unsigned long hbase = cp->a1;
98 	unsigned long funcid = cp->a6;
99 	unsigned long vmid;
100 
101 	switch (funcid) {
102 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
103 		kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
104 		kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
105 		break;
106 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
107 		vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
108 		if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
109 			kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask, vmid);
110 		else
111 			kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
112 						  cp->a2, cp->a3, PAGE_SHIFT, vmid);
113 		kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
114 		break;
115 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
116 		vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
117 		if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL)
118 			kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask,
119 						       cp->a4, vmid);
120 		else
121 			kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, hbase, hmask, cp->a2,
122 						       cp->a3, PAGE_SHIFT, cp->a4, vmid);
123 		kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
124 		break;
125 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
126 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
127 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
128 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
129 		/*
130 		 * Until nested virtualization is implemented, the
131 		 * SBI HFENCE calls should return not supported
132 		 * hence fallthrough.
133 		 */
134 	default:
135 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
136 	}
137 
138 	return 0;
139 }
140 
141 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
142 	.extid_start = SBI_EXT_RFENCE,
143 	.extid_end = SBI_EXT_RFENCE,
144 	.handler = kvm_sbi_ext_rfence_handler,
145 };
146 
kvm_sbi_ext_srst_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)147 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
148 				    struct kvm_run *run,
149 				    struct kvm_vcpu_sbi_return *retdata)
150 {
151 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
152 	unsigned long funcid = cp->a6;
153 	u32 reason = cp->a1;
154 	u32 type = cp->a0;
155 
156 	switch (funcid) {
157 	case SBI_EXT_SRST_RESET:
158 		switch (type) {
159 		case SBI_SRST_RESET_TYPE_SHUTDOWN:
160 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
161 						KVM_SYSTEM_EVENT_SHUTDOWN,
162 						reason);
163 			retdata->uexit = true;
164 			break;
165 		case SBI_SRST_RESET_TYPE_COLD_REBOOT:
166 		case SBI_SRST_RESET_TYPE_WARM_REBOOT:
167 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
168 						KVM_SYSTEM_EVENT_RESET,
169 						reason);
170 			retdata->uexit = true;
171 			break;
172 		default:
173 			retdata->err_val = SBI_ERR_NOT_SUPPORTED;
174 		}
175 		break;
176 	default:
177 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
178 	}
179 
180 	return 0;
181 }
182 
183 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
184 	.extid_start = SBI_EXT_SRST,
185 	.extid_end = SBI_EXT_SRST,
186 	.handler = kvm_sbi_ext_srst_handler,
187 };
188 
kvm_sbi_ext_dbcn_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)189 static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
190 				    struct kvm_run *run,
191 				    struct kvm_vcpu_sbi_return *retdata)
192 {
193 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
194 	unsigned long funcid = cp->a6;
195 
196 	switch (funcid) {
197 	case SBI_EXT_DBCN_CONSOLE_WRITE:
198 	case SBI_EXT_DBCN_CONSOLE_READ:
199 	case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
200 		/*
201 		 * The SBI debug console functions are unconditionally
202 		 * forwarded to the userspace.
203 		 */
204 		kvm_riscv_vcpu_sbi_forward(vcpu, run);
205 		retdata->uexit = true;
206 		break;
207 	default:
208 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
209 	}
210 
211 	return 0;
212 }
213 
214 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
215 	.extid_start = SBI_EXT_DBCN,
216 	.extid_end = SBI_EXT_DBCN,
217 	.default_disabled = true,
218 	.handler = kvm_sbi_ext_dbcn_handler,
219 };
220