1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <trace/events/kvm.h>
10
11 #include "trace.h"
12
kvm_mmio_write_buf(void * buf,unsigned int len,unsigned long data)13 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
14 {
15 void *datap = NULL;
16 union {
17 u8 byte;
18 u16 hword;
19 u32 word;
20 u64 dword;
21 } tmp;
22
23 switch (len) {
24 case 1:
25 tmp.byte = data;
26 datap = &tmp.byte;
27 break;
28 case 2:
29 tmp.hword = data;
30 datap = &tmp.hword;
31 break;
32 case 4:
33 tmp.word = data;
34 datap = &tmp.word;
35 break;
36 case 8:
37 tmp.dword = data;
38 datap = &tmp.dword;
39 break;
40 }
41
42 memcpy(buf, datap, len);
43 }
44
kvm_mmio_read_buf(const void * buf,unsigned int len)45 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
46 {
47 unsigned long data = 0;
48 union {
49 u16 hword;
50 u32 word;
51 u64 dword;
52 } tmp;
53
54 switch (len) {
55 case 1:
56 data = *(u8 *)buf;
57 break;
58 case 2:
59 memcpy(&tmp.hword, buf, len);
60 data = tmp.hword;
61 break;
62 case 4:
63 memcpy(&tmp.word, buf, len);
64 data = tmp.word;
65 break;
66 case 8:
67 memcpy(&tmp.dword, buf, len);
68 data = tmp.dword;
69 break;
70 }
71
72 return data;
73 }
74
kvm_pending_external_abort(struct kvm_vcpu * vcpu)75 static bool kvm_pending_external_abort(struct kvm_vcpu *vcpu)
76 {
77 if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
78 return false;
79
80 if (vcpu_el1_is_32bit(vcpu)) {
81 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
82 case unpack_vcpu_flag(EXCEPT_AA32_UND):
83 case unpack_vcpu_flag(EXCEPT_AA32_IABT):
84 case unpack_vcpu_flag(EXCEPT_AA32_DABT):
85 return true;
86 default:
87 return false;
88 }
89 } else {
90 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
91 case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
92 case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
93 case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR):
94 case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR):
95 return true;
96 default:
97 return false;
98 }
99 }
100 }
101
102 /**
103 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
104 * or in-kernel IO emulation
105 *
106 * @vcpu: The VCPU pointer
107 */
kvm_handle_mmio_return(struct kvm_vcpu * vcpu)108 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
109 {
110 unsigned long data;
111 unsigned int len;
112 int mask;
113
114 /*
115 * Detect if the MMIO return was already handled or if userspace aborted
116 * the MMIO access.
117 */
118 if (unlikely(!vcpu->mmio_needed || kvm_pending_external_abort(vcpu)))
119 return 1;
120
121 vcpu->mmio_needed = 0;
122
123 if (!kvm_vcpu_dabt_iswrite(vcpu)) {
124 struct kvm_run *run = vcpu->run;
125
126 len = kvm_vcpu_dabt_get_as(vcpu);
127 data = kvm_mmio_read_buf(run->mmio.data, len);
128
129 if (kvm_vcpu_dabt_issext(vcpu) &&
130 len < sizeof(unsigned long)) {
131 mask = 1U << ((len * 8) - 1);
132 data = (data ^ mask) - mask;
133 }
134
135 if (!kvm_vcpu_dabt_issf(vcpu))
136 data = data & 0xffffffff;
137
138 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
139 &data);
140 data = vcpu_data_host_to_guest(vcpu, data, len);
141 vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
142 }
143
144 /*
145 * The MMIO instruction is emulated and should not be re-executed
146 * in the guest.
147 */
148 kvm_incr_pc(vcpu);
149
150 return 1;
151 }
152
io_mem_abort(struct kvm_vcpu * vcpu,phys_addr_t fault_ipa)153 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
154 {
155 struct kvm_run *run = vcpu->run;
156 unsigned long data;
157 unsigned long rt;
158 int ret;
159 bool is_write;
160 int len;
161 u8 data_buf[8];
162 u64 esr;
163
164 esr = kvm_vcpu_get_esr(vcpu);
165
166 /*
167 * No valid syndrome? Ask userspace for help if it has
168 * volunteered to do so, and bail out otherwise.
169 *
170 * In the protected VM case, there isn't much userspace can do
171 * though, so directly deliver an exception to the guest.
172 */
173 if (!kvm_vcpu_dabt_isvalid(vcpu)) {
174 trace_kvm_mmio_nisv(*vcpu_pc(vcpu), esr,
175 kvm_vcpu_get_hfar(vcpu), fault_ipa);
176
177 if (vcpu_is_protected(vcpu))
178 return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
179
180 if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
181 &vcpu->kvm->arch.flags)) {
182 run->exit_reason = KVM_EXIT_ARM_NISV;
183 run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
184 run->arm_nisv.fault_ipa = fault_ipa;
185 return 0;
186 }
187
188 return -ENOSYS;
189 }
190
191 /*
192 * When (DFSC == 0b00xxxx || DFSC == 0b10101x) && DFSC != 0b0000xx
193 * ESR_EL2[12:11] describe the Load/Store Type. This allows us to
194 * punt the LD64B/ST64B/ST64BV/ST64BV0 instructions to userspace,
195 * which will have to provide a full emulation of these 4
196 * instructions. No, we don't expect this do be fast.
197 *
198 * We rely on traps being set if the corresponding features are not
199 * enabled, so if we get here, userspace has promised us to handle
200 * it already.
201 */
202 switch (kvm_vcpu_trap_get_fault(vcpu)) {
203 case 0b000100 ... 0b001111:
204 case 0b101010 ... 0b101011:
205 if (FIELD_GET(GENMASK(12, 11), esr)) {
206 run->exit_reason = KVM_EXIT_ARM_LDST64B;
207 run->arm_nisv.esr_iss = esr & ~(u64)ESR_ELx_FSC;
208 run->arm_nisv.fault_ipa = fault_ipa;
209 return 0;
210 }
211 }
212
213 /*
214 * Prepare MMIO operation. First decode the syndrome data we get
215 * from the CPU. Then try if some in-kernel emulation feels
216 * responsible, otherwise let user space do its magic.
217 */
218 is_write = kvm_vcpu_dabt_iswrite(vcpu);
219 len = kvm_vcpu_dabt_get_as(vcpu);
220 rt = kvm_vcpu_dabt_get_rd(vcpu);
221
222 if (is_write) {
223 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
224 len);
225
226 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
227 kvm_mmio_write_buf(data_buf, len, data);
228
229 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
230 data_buf);
231 } else {
232 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
233 fault_ipa, NULL);
234
235 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
236 data_buf);
237 }
238
239 /* Now prepare kvm_run for the potential return to userland. */
240 run->mmio.is_write = is_write;
241 run->mmio.phys_addr = fault_ipa;
242 run->mmio.len = len;
243 vcpu->mmio_needed = 1;
244
245 if (!ret) {
246 /* We handled the access successfully in the kernel. */
247 if (!is_write)
248 memcpy(run->mmio.data, data_buf, len);
249 vcpu->stat.mmio_exit_kernel++;
250 kvm_handle_mmio_return(vcpu);
251 return 1;
252 }
253
254 if (is_write)
255 memcpy(run->mmio.data, data_buf, len);
256 vcpu->stat.mmio_exit_user++;
257 run->exit_reason = KVM_EXIT_MMIO;
258 return 0;
259 }
260