1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
4  *
5  * The test validates the basic hypercall functionalities that are exposed
6  * via the psuedo-firmware bitmap register. This includes the registers'
7  * read/write behavior before and after the VM has started, and if the
8  * hypercalls are properly masked or unmasked to the guest when disabled or
9  * enabled from the KVM userspace, respectively.
10  */
11 #include <errno.h>
12 #include <linux/arm-smccc.h>
13 #include <asm/kvm.h>
14 #include <kvm_util.h>
15 
16 #include "processor.h"
17 
18 #define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
19 
20 /* Last valid bits of the bitmapped firmware registers */
21 #define KVM_REG_ARM_STD_BMAP_BIT_MAX		0
22 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX	0
23 #define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX	1
24 #define KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_MAX   1
25 
26 #define KVM_REG_ARM_STD_BMAP_RESET_VAL		FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_BMAP_BIT_MAX)
27 #define KVM_REG_ARM_STD_HYP_BMAP_RESET_VAL	FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX)
28 #define KVM_REG_ARM_VENDOR_HYP_BMAP_RESET_VAL	FW_REG_ULIMIT_VAL(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX)
29 #define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0
30 
31 struct kvm_fw_reg_info {
32 	uint64_t reg;		/* Register definition */
33 	uint64_t max_feat_bit;	/* Bit that represents the upper limit of the feature-map */
34 	uint64_t reset_val;	/* Reset value for the register */
35 };
36 
37 #define FW_REG_INFO(r)			\
38 	{					\
39 		.reg = r,			\
40 		.max_feat_bit = r##_BIT_MAX,	\
41 		.reset_val = r##_RESET_VAL	\
42 	}
43 
44 static const struct kvm_fw_reg_info fw_reg_info[] = {
45 	FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
46 	FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
47 	FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
48 	FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP_2),
49 };
50 
51 enum test_stage {
52 	TEST_STAGE_REG_IFACE,
53 	TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
54 	TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
55 	TEST_STAGE_HVC_IFACE_FALSE_INFO,
56 	TEST_STAGE_END,
57 };
58 
59 static int stage = TEST_STAGE_REG_IFACE;
60 
61 struct test_hvc_info {
62 	uint32_t func_id;
63 	uint64_t arg1;
64 };
65 
66 #define TEST_HVC_INFO(f, a1)	\
67 	{			\
68 		.func_id = f,	\
69 		.arg1 = a1,	\
70 	}
71 
72 static const struct test_hvc_info hvc_info[] = {
73 	/* KVM_REG_ARM_STD_BMAP */
74 	TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
75 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
76 	TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
77 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
78 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
79 
80 	/* KVM_REG_ARM_STD_HYP_BMAP */
81 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
82 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
83 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
84 
85 	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
86 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
87 			ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
88 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
89 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
90 };
91 
92 /* Feed false hypercall info to test the KVM behavior */
93 static const struct test_hvc_info false_hvc_info[] = {
94 	/* Feature support check against a different family of hypercalls */
95 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
96 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
97 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
98 };
99 
guest_test_hvc(const struct test_hvc_info * hc_info)100 static void guest_test_hvc(const struct test_hvc_info *hc_info)
101 {
102 	unsigned int i;
103 	struct arm_smccc_res res;
104 	unsigned int hvc_info_arr_sz;
105 
106 	hvc_info_arr_sz =
107 	hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
108 
109 	for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
110 		memset(&res, 0, sizeof(res));
111 		smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
112 
113 		switch (stage) {
114 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
115 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
116 			__GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
117 				       "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u",
118 					res.a0, hc_info->func_id, hc_info->arg1, stage);
119 			break;
120 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
121 			__GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
122 				       "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u",
123 					res.a0, hc_info->func_id, hc_info->arg1, stage);
124 			break;
125 		default:
126 			GUEST_FAIL("Unexpected stage = %u", stage);
127 		}
128 	}
129 }
130 
guest_code(void)131 static void guest_code(void)
132 {
133 	while (stage != TEST_STAGE_END) {
134 		switch (stage) {
135 		case TEST_STAGE_REG_IFACE:
136 			break;
137 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
138 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
139 			guest_test_hvc(hvc_info);
140 			break;
141 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
142 			guest_test_hvc(false_hvc_info);
143 			break;
144 		default:
145 			GUEST_FAIL("Unexpected stage = %u", stage);
146 		}
147 
148 		GUEST_SYNC(stage);
149 	}
150 
151 	GUEST_DONE();
152 }
153 
154 struct st_time {
155 	uint32_t rev;
156 	uint32_t attr;
157 	uint64_t st_time;
158 };
159 
160 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
161 #define ST_GPA_BASE		(1 << 30)
162 
steal_time_init(struct kvm_vcpu * vcpu)163 static void steal_time_init(struct kvm_vcpu *vcpu)
164 {
165 	uint64_t st_ipa = (ulong)ST_GPA_BASE;
166 	unsigned int gpages;
167 
168 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
169 	vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
170 
171 	vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
172 			     KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
173 }
174 
test_fw_regs_before_vm_start(struct kvm_vcpu * vcpu)175 static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
176 {
177 	uint64_t val;
178 	unsigned int i;
179 	int ret;
180 
181 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
182 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
183 		uint64_t set_val;
184 
185 		/* First 'read' should be the reset value for the reg  */
186 		val = vcpu_get_reg(vcpu, reg_info->reg);
187 		TEST_ASSERT(val == reg_info->reset_val,
188 			"Unexpected reset value for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
189 			reg_info->reg, reg_info->reset_val, val);
190 
191 		if (reg_info->reset_val)
192 			set_val = 0;
193 		else
194 			set_val = FW_REG_ULIMIT_VAL(reg_info->max_feat_bit);
195 
196 		ret = __vcpu_set_reg(vcpu, reg_info->reg, set_val);
197 		TEST_ASSERT(ret == 0,
198 			"Failed to %s all the features of reg: 0x%lx; ret: %d",
199 			(set_val ? "set" : "clear"), reg_info->reg, errno);
200 
201 		val = vcpu_get_reg(vcpu, reg_info->reg);
202 		TEST_ASSERT(val == set_val,
203 			"Expected all the features to be %s for reg: 0x%lx",
204 			(set_val ? "set" : "cleared"), reg_info->reg);
205 
206 		/*
207 		 * If the reg has been set, clear it as test_fw_regs_after_vm_start()
208 		 * expects it to be cleared.
209 		 */
210 		if (set_val) {
211 			ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
212 			TEST_ASSERT(ret == 0,
213 			"Failed to clear all the features of reg: 0x%lx; ret: %d",
214 			reg_info->reg, errno);
215 		}
216 
217 		/*
218 		 * Test enabling a feature that's not supported.
219 		 * Avoid this check if all the bits are occupied.
220 		 */
221 		if (reg_info->max_feat_bit < 63) {
222 			ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
223 			TEST_ASSERT(ret != 0 && errno == EINVAL,
224 			"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
225 			errno, reg_info->reg);
226 		}
227 	}
228 }
229 
test_fw_regs_after_vm_start(struct kvm_vcpu * vcpu)230 static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
231 {
232 	uint64_t val;
233 	unsigned int i;
234 	int ret;
235 
236 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
237 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
238 
239 		/*
240 		 * Before starting the VM, the test clears all the bits.
241 		 * Check if that's still the case.
242 		 */
243 		val = vcpu_get_reg(vcpu, reg_info->reg);
244 		TEST_ASSERT(val == 0,
245 			"Expected all the features to be cleared for reg: 0x%lx",
246 			reg_info->reg);
247 
248 		/*
249 		 * Since the VM has run at least once, KVM shouldn't allow modification of
250 		 * the registers and should return EBUSY. Set the registers and check for
251 		 * the expected errno.
252 		 */
253 		ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
254 		TEST_ASSERT(ret != 0 && errno == EBUSY,
255 		"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
256 		errno, reg_info->reg);
257 	}
258 }
259 
test_vm_create(struct kvm_vcpu ** vcpu)260 static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
261 {
262 	struct kvm_vm *vm;
263 
264 	vm = vm_create_with_one_vcpu(vcpu, guest_code);
265 
266 	steal_time_init(*vcpu);
267 
268 	return vm;
269 }
270 
test_guest_stage(struct kvm_vm ** vm,struct kvm_vcpu ** vcpu)271 static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
272 {
273 	int prev_stage = stage;
274 
275 	pr_debug("Stage: %d\n", prev_stage);
276 
277 	/* Sync the stage early, the VM might be freed below. */
278 	stage++;
279 	sync_global_to_guest(*vm, stage);
280 
281 	switch (prev_stage) {
282 	case TEST_STAGE_REG_IFACE:
283 		test_fw_regs_after_vm_start(*vcpu);
284 		break;
285 	case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
286 		/* Start a new VM so that all the features are now enabled by default */
287 		kvm_vm_free(*vm);
288 		*vm = test_vm_create(vcpu);
289 		break;
290 	case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
291 	case TEST_STAGE_HVC_IFACE_FALSE_INFO:
292 		break;
293 	default:
294 		TEST_FAIL("Unknown test stage: %d", prev_stage);
295 	}
296 }
297 
test_run(void)298 static void test_run(void)
299 {
300 	struct kvm_vcpu *vcpu;
301 	struct kvm_vm *vm;
302 	struct ucall uc;
303 	bool guest_done = false;
304 
305 	vm = test_vm_create(&vcpu);
306 
307 	test_fw_regs_before_vm_start(vcpu);
308 
309 	while (!guest_done) {
310 		vcpu_run(vcpu);
311 
312 		switch (get_ucall(vcpu, &uc)) {
313 		case UCALL_SYNC:
314 			test_guest_stage(&vm, &vcpu);
315 			break;
316 		case UCALL_DONE:
317 			guest_done = true;
318 			break;
319 		case UCALL_ABORT:
320 			REPORT_GUEST_ASSERT(uc);
321 			break;
322 		default:
323 			TEST_FAIL("Unexpected guest exit");
324 		}
325 	}
326 
327 	kvm_vm_free(vm);
328 }
329 
main(void)330 int main(void)
331 {
332 	test_run();
333 	return 0;
334 }
335