1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Arm Ltd.
3
4 #include <linux/arm-smccc.h>
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm_emulate.h>
8
9 #include <kvm/arm_hypercalls.h>
10 #include <kvm/arm_psci.h>
11
12 #define KVM_ARM_SMCCC_STD_FEATURES \
13 GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
14 #define KVM_ARM_SMCCC_STD_HYP_FEATURES \
15 GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
16 #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
17 GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
18 #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2 \
19 GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT - 1, 0)
20
kvm_ptp_get_time(struct kvm_vcpu * vcpu,u64 * val)21 static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
22 {
23 struct system_time_snapshot systime_snapshot;
24 u64 cycles = ~0UL;
25 u32 feature;
26
27 /*
28 * system time and counter value must captured at the same
29 * time to keep consistency and precision.
30 */
31 ktime_get_snapshot(&systime_snapshot);
32
33 /*
34 * This is only valid if the current clocksource is the
35 * architected counter, as this is the only one the guest
36 * can see.
37 */
38 if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
39 return;
40
41 /*
42 * The guest selects one of the two reference counters
43 * (virtual or physical) with the first argument of the SMCCC
44 * call. In case the identifier is not supported, error out.
45 */
46 feature = smccc_get_arg1(vcpu);
47 switch (feature) {
48 case KVM_PTP_VIRT_COUNTER:
49 cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
50 break;
51 case KVM_PTP_PHYS_COUNTER:
52 cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
53 break;
54 default:
55 return;
56 }
57
58 /*
59 * This relies on the top bit of val[0] never being set for
60 * valid values of system time, because that is *really* far
61 * in the future (about 292 years from 1970, and at that stage
62 * nobody will give a damn about it).
63 */
64 val[0] = upper_32_bits(systime_snapshot.real);
65 val[1] = lower_32_bits(systime_snapshot.real);
66 val[2] = upper_32_bits(cycles);
67 val[3] = lower_32_bits(cycles);
68 }
69
kvm_smccc_default_allowed(u32 func_id)70 static bool kvm_smccc_default_allowed(u32 func_id)
71 {
72 switch (func_id) {
73 /*
74 * List of function-ids that are not gated with the bitmapped
75 * feature firmware registers, and are to be allowed for
76 * servicing the call by default.
77 */
78 case ARM_SMCCC_VERSION_FUNC_ID:
79 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
80 return true;
81 default:
82 /* PSCI 0.2 and up is in the 0:0x1f range */
83 if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
84 ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
85 return true;
86
87 /*
88 * KVM's PSCI 0.1 doesn't comply with SMCCC, and has
89 * its own function-id base and range
90 */
91 if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
92 return true;
93
94 return false;
95 }
96 }
97
kvm_smccc_test_fw_bmap(struct kvm_vcpu * vcpu,u32 func_id)98 static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
99 {
100 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
101
102 switch (func_id) {
103 case ARM_SMCCC_TRNG_VERSION:
104 case ARM_SMCCC_TRNG_FEATURES:
105 case ARM_SMCCC_TRNG_GET_UUID:
106 case ARM_SMCCC_TRNG_RND32:
107 case ARM_SMCCC_TRNG_RND64:
108 return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
109 &smccc_feat->std_bmap);
110 case ARM_SMCCC_HV_PV_TIME_FEATURES:
111 case ARM_SMCCC_HV_PV_TIME_ST:
112 return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
113 &smccc_feat->std_hyp_bmap);
114 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
115 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
116 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
117 &smccc_feat->vendor_hyp_bmap);
118 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
119 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
120 &smccc_feat->vendor_hyp_bmap);
121 default:
122 return false;
123 }
124 }
125
126 #define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID
127 #define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
128 ARM_SMCCC_SMC_32, \
129 0, ARM_SMCCC_FUNC_MASK)
130
131 #define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
132 ARM_SMCCC_SMC_64, \
133 0, 0)
134 #define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
135 ARM_SMCCC_SMC_64, \
136 0, ARM_SMCCC_FUNC_MASK)
137
kvm_smccc_filter_insert_reserved(struct kvm * kvm)138 static int kvm_smccc_filter_insert_reserved(struct kvm *kvm)
139 {
140 int r;
141
142 /*
143 * Prevent userspace from handling any SMCCC calls in the architecture
144 * range, avoiding the risk of misrepresenting Spectre mitigation status
145 * to the guest.
146 */
147 r = mtree_insert_range(&kvm->arch.smccc_filter,
148 SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
149 xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
150 GFP_KERNEL_ACCOUNT);
151 if (r)
152 goto out_destroy;
153
154 r = mtree_insert_range(&kvm->arch.smccc_filter,
155 SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
156 xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
157 GFP_KERNEL_ACCOUNT);
158 if (r)
159 goto out_destroy;
160
161 return 0;
162 out_destroy:
163 mtree_destroy(&kvm->arch.smccc_filter);
164 return r;
165 }
166
kvm_smccc_filter_configured(struct kvm * kvm)167 static bool kvm_smccc_filter_configured(struct kvm *kvm)
168 {
169 return !mtree_empty(&kvm->arch.smccc_filter);
170 }
171
kvm_smccc_set_filter(struct kvm * kvm,struct kvm_smccc_filter __user * uaddr)172 static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
173 {
174 const void *zero_page = page_to_virt(ZERO_PAGE(0));
175 struct kvm_smccc_filter filter;
176 u32 start, end;
177 int r;
178
179 if (copy_from_user(&filter, uaddr, sizeof(filter)))
180 return -EFAULT;
181
182 if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
183 return -EINVAL;
184
185 start = filter.base;
186 end = start + filter.nr_functions - 1;
187
188 if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
189 return -EINVAL;
190
191 mutex_lock(&kvm->arch.config_lock);
192
193 if (kvm_vm_has_ran_once(kvm)) {
194 r = -EBUSY;
195 goto out_unlock;
196 }
197
198 if (!kvm_smccc_filter_configured(kvm)) {
199 r = kvm_smccc_filter_insert_reserved(kvm);
200 if (WARN_ON_ONCE(r))
201 goto out_unlock;
202 }
203
204 r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
205 xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
206 out_unlock:
207 mutex_unlock(&kvm->arch.config_lock);
208 return r;
209 }
210
kvm_smccc_filter_get_action(struct kvm * kvm,u32 func_id)211 static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
212 {
213 unsigned long idx = func_id;
214 void *val;
215
216 if (!kvm_smccc_filter_configured(kvm))
217 return KVM_SMCCC_FILTER_HANDLE;
218
219 /*
220 * But where's the error handling, you say?
221 *
222 * mt_find() returns NULL if no entry was found, which just so happens
223 * to match KVM_SMCCC_FILTER_HANDLE.
224 */
225 val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
226 return xa_to_value(val);
227 }
228
kvm_smccc_get_action(struct kvm_vcpu * vcpu,u32 func_id)229 static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
230 {
231 /*
232 * Intervening actions in the SMCCC filter take precedence over the
233 * pseudo-firmware register bitmaps.
234 */
235 u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
236 if (action != KVM_SMCCC_FILTER_HANDLE)
237 return action;
238
239 if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
240 kvm_smccc_default_allowed(func_id))
241 return KVM_SMCCC_FILTER_HANDLE;
242
243 return KVM_SMCCC_FILTER_DENY;
244 }
245
kvm_prepare_hypercall_exit(struct kvm_vcpu * vcpu,u32 func_id)246 static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
247 {
248 u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
249 struct kvm_run *run = vcpu->run;
250 u64 flags = 0;
251
252 if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
253 flags |= KVM_HYPERCALL_EXIT_SMC;
254
255 if (!kvm_vcpu_trap_il_is32bit(vcpu))
256 flags |= KVM_HYPERCALL_EXIT_16BIT;
257
258 run->exit_reason = KVM_EXIT_HYPERCALL;
259 run->hypercall = (typeof(run->hypercall)) {
260 .nr = func_id,
261 .flags = flags,
262 };
263 }
264
kvm_smccc_call_handler(struct kvm_vcpu * vcpu)265 int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
266 {
267 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
268 u32 func_id = smccc_get_function(vcpu);
269 u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
270 u32 feature;
271 u8 action;
272 gpa_t gpa;
273
274 action = kvm_smccc_get_action(vcpu, func_id);
275 switch (action) {
276 case KVM_SMCCC_FILTER_HANDLE:
277 break;
278 case KVM_SMCCC_FILTER_DENY:
279 goto out;
280 case KVM_SMCCC_FILTER_FWD_TO_USER:
281 kvm_prepare_hypercall_exit(vcpu, func_id);
282 return 0;
283 default:
284 WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
285 goto out;
286 }
287
288 switch (func_id) {
289 case ARM_SMCCC_VERSION_FUNC_ID:
290 val[0] = ARM_SMCCC_VERSION_1_1;
291 break;
292 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
293 feature = smccc_get_arg1(vcpu);
294 switch (feature) {
295 case ARM_SMCCC_ARCH_WORKAROUND_1:
296 switch (arm64_get_spectre_v2_state()) {
297 case SPECTRE_VULNERABLE:
298 break;
299 case SPECTRE_MITIGATED:
300 val[0] = SMCCC_RET_SUCCESS;
301 break;
302 case SPECTRE_UNAFFECTED:
303 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
304 break;
305 }
306 break;
307 case ARM_SMCCC_ARCH_WORKAROUND_2:
308 switch (arm64_get_spectre_v4_state()) {
309 case SPECTRE_VULNERABLE:
310 break;
311 case SPECTRE_MITIGATED:
312 /*
313 * SSBS everywhere: Indicate no firmware
314 * support, as the SSBS support will be
315 * indicated to the guest and the default is
316 * safe.
317 *
318 * Otherwise, expose a permanent mitigation
319 * to the guest, and hide SSBS so that the
320 * guest stays protected.
321 */
322 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
323 break;
324 fallthrough;
325 case SPECTRE_UNAFFECTED:
326 val[0] = SMCCC_RET_NOT_REQUIRED;
327 break;
328 }
329 break;
330 case ARM_SMCCC_ARCH_WORKAROUND_3:
331 switch (arm64_get_spectre_bhb_state()) {
332 case SPECTRE_VULNERABLE:
333 break;
334 case SPECTRE_MITIGATED:
335 val[0] = SMCCC_RET_SUCCESS;
336 break;
337 case SPECTRE_UNAFFECTED:
338 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
339 break;
340 }
341 break;
342 case ARM_SMCCC_HV_PV_TIME_FEATURES:
343 if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
344 &smccc_feat->std_hyp_bmap))
345 val[0] = SMCCC_RET_SUCCESS;
346 break;
347 }
348 break;
349 case ARM_SMCCC_HV_PV_TIME_FEATURES:
350 val[0] = kvm_hypercall_pv_features(vcpu);
351 break;
352 case ARM_SMCCC_HV_PV_TIME_ST:
353 gpa = kvm_init_stolen_time(vcpu);
354 if (gpa != INVALID_GPA)
355 val[0] = gpa;
356 break;
357 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
358 val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
359 val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
360 val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
361 val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
362 break;
363 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
364 val[0] = smccc_feat->vendor_hyp_bmap;
365 /* Function numbers 2-63 are reserved for pKVM for now */
366 val[2] = smccc_feat->vendor_hyp_bmap_2;
367 break;
368 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
369 kvm_ptp_get_time(vcpu, val);
370 break;
371 case ARM_SMCCC_TRNG_VERSION:
372 case ARM_SMCCC_TRNG_FEATURES:
373 case ARM_SMCCC_TRNG_GET_UUID:
374 case ARM_SMCCC_TRNG_RND32:
375 case ARM_SMCCC_TRNG_RND64:
376 return kvm_trng_call(vcpu);
377 default:
378 return kvm_psci_call(vcpu);
379 }
380
381 out:
382 smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
383 return 1;
384 }
385
386 static const u64 kvm_arm_fw_reg_ids[] = {
387 KVM_REG_ARM_PSCI_VERSION,
388 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
389 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
390 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
391 KVM_REG_ARM_STD_BMAP,
392 KVM_REG_ARM_STD_HYP_BMAP,
393 KVM_REG_ARM_VENDOR_HYP_BMAP,
394 KVM_REG_ARM_VENDOR_HYP_BMAP_2,
395 };
396
kvm_arm_init_hypercalls(struct kvm * kvm)397 void kvm_arm_init_hypercalls(struct kvm *kvm)
398 {
399 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
400
401 smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
402 smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
403 smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
404
405 mt_init(&kvm->arch.smccc_filter);
406 }
407
kvm_arm_teardown_hypercalls(struct kvm * kvm)408 void kvm_arm_teardown_hypercalls(struct kvm *kvm)
409 {
410 mtree_destroy(&kvm->arch.smccc_filter);
411 }
412
kvm_arm_get_fw_num_regs(struct kvm_vcpu * vcpu)413 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
414 {
415 return ARRAY_SIZE(kvm_arm_fw_reg_ids);
416 }
417
kvm_arm_copy_fw_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)418 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
419 {
420 int i;
421
422 for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
423 if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
424 return -EFAULT;
425 }
426
427 return 0;
428 }
429
430 #define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0)
431
432 /*
433 * Convert the workaround level into an easy-to-compare number, where higher
434 * values mean better protection.
435 */
get_kernel_wa_level(struct kvm_vcpu * vcpu,u64 regid)436 static int get_kernel_wa_level(struct kvm_vcpu *vcpu, u64 regid)
437 {
438 switch (regid) {
439 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
440 switch (arm64_get_spectre_v2_state()) {
441 case SPECTRE_VULNERABLE:
442 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
443 case SPECTRE_MITIGATED:
444 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
445 case SPECTRE_UNAFFECTED:
446 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
447 }
448 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
449 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
450 switch (arm64_get_spectre_v4_state()) {
451 case SPECTRE_MITIGATED:
452 /*
453 * As for the hypercall discovery, we pretend we
454 * don't have any FW mitigation if SSBS is there at
455 * all times.
456 */
457 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
458 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
459 fallthrough;
460 case SPECTRE_UNAFFECTED:
461 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
462 case SPECTRE_VULNERABLE:
463 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
464 }
465 break;
466 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
467 switch (arm64_get_spectre_bhb_state()) {
468 case SPECTRE_VULNERABLE:
469 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
470 case SPECTRE_MITIGATED:
471 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
472 case SPECTRE_UNAFFECTED:
473 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
474 }
475 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
476 }
477
478 return -EINVAL;
479 }
480
kvm_arm_get_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)481 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
482 {
483 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
484 void __user *uaddr = (void __user *)(long)reg->addr;
485 u64 val;
486
487 switch (reg->id) {
488 case KVM_REG_ARM_PSCI_VERSION:
489 val = kvm_psci_version(vcpu);
490 break;
491 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
492 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
493 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
494 val = get_kernel_wa_level(vcpu, reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
495 break;
496 case KVM_REG_ARM_STD_BMAP:
497 val = READ_ONCE(smccc_feat->std_bmap);
498 break;
499 case KVM_REG_ARM_STD_HYP_BMAP:
500 val = READ_ONCE(smccc_feat->std_hyp_bmap);
501 break;
502 case KVM_REG_ARM_VENDOR_HYP_BMAP:
503 val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
504 break;
505 case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
506 val = READ_ONCE(smccc_feat->vendor_hyp_bmap_2);
507 break;
508 default:
509 return -ENOENT;
510 }
511
512 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
513 return -EFAULT;
514
515 return 0;
516 }
517
kvm_arm_set_fw_reg_bmap(struct kvm_vcpu * vcpu,u64 reg_id,u64 val)518 static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
519 {
520 int ret = 0;
521 struct kvm *kvm = vcpu->kvm;
522 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
523 unsigned long *fw_reg_bmap, fw_reg_features;
524
525 switch (reg_id) {
526 case KVM_REG_ARM_STD_BMAP:
527 fw_reg_bmap = &smccc_feat->std_bmap;
528 fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
529 break;
530 case KVM_REG_ARM_STD_HYP_BMAP:
531 fw_reg_bmap = &smccc_feat->std_hyp_bmap;
532 fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
533 break;
534 case KVM_REG_ARM_VENDOR_HYP_BMAP:
535 fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
536 fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
537 break;
538 case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
539 fw_reg_bmap = &smccc_feat->vendor_hyp_bmap_2;
540 fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2;
541 break;
542 default:
543 return -ENOENT;
544 }
545
546 /* Check for unsupported bit */
547 if (val & ~fw_reg_features)
548 return -EINVAL;
549
550 mutex_lock(&kvm->arch.config_lock);
551
552 if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
553 ret = -EBUSY;
554 goto out;
555 }
556
557 WRITE_ONCE(*fw_reg_bmap, val);
558 out:
559 mutex_unlock(&kvm->arch.config_lock);
560 return ret;
561 }
562
kvm_arm_set_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)563 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
564 {
565 void __user *uaddr = (void __user *)(long)reg->addr;
566 u64 val;
567 int wa_level;
568
569 if (KVM_REG_SIZE(reg->id) != sizeof(val))
570 return -ENOENT;
571 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
572 return -EFAULT;
573
574 switch (reg->id) {
575 case KVM_REG_ARM_PSCI_VERSION:
576 {
577 bool wants_02;
578
579 wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
580
581 switch (val) {
582 case KVM_ARM_PSCI_0_1:
583 if (wants_02)
584 return -EINVAL;
585 vcpu->kvm->arch.psci_version = val;
586 return 0;
587 case KVM_ARM_PSCI_0_2:
588 case KVM_ARM_PSCI_1_0:
589 case KVM_ARM_PSCI_1_1:
590 case KVM_ARM_PSCI_1_2:
591 case KVM_ARM_PSCI_1_3:
592 if (!wants_02)
593 return -EINVAL;
594 vcpu->kvm->arch.psci_version = val;
595 return 0;
596 }
597 break;
598 }
599
600 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
601 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
602 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
603 return -EINVAL;
604
605 if (get_kernel_wa_level(vcpu, reg->id) < val)
606 return -EINVAL;
607
608 return 0;
609
610 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
611 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
612 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
613 return -EINVAL;
614
615 /* The enabled bit must not be set unless the level is AVAIL. */
616 if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
617 (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
618 return -EINVAL;
619
620 /*
621 * Map all the possible incoming states to the only two we
622 * really want to deal with.
623 */
624 switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
625 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
626 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
627 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
628 break;
629 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
630 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
631 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
632 break;
633 default:
634 return -EINVAL;
635 }
636
637 /*
638 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
639 * other way around.
640 */
641 if (get_kernel_wa_level(vcpu, reg->id) < wa_level)
642 return -EINVAL;
643
644 return 0;
645 case KVM_REG_ARM_STD_BMAP:
646 case KVM_REG_ARM_STD_HYP_BMAP:
647 case KVM_REG_ARM_VENDOR_HYP_BMAP:
648 case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
649 return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
650 default:
651 return -ENOENT;
652 }
653
654 return -EINVAL;
655 }
656
kvm_vm_smccc_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)657 int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
658 {
659 switch (attr->attr) {
660 case KVM_ARM_VM_SMCCC_FILTER:
661 return 0;
662 default:
663 return -ENXIO;
664 }
665 }
666
kvm_vm_smccc_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)667 int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
668 {
669 void __user *uaddr = (void __user *)attr->addr;
670
671 switch (attr->attr) {
672 case KVM_ARM_VM_SMCCC_FILTER:
673 return kvm_smccc_set_filter(kvm, uaddr);
674 default:
675 return -ENXIO;
676 }
677 }
678