1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for exiting into userspace on registered MSRs
6 */
7 #include <sys/ioctl.h>
8
9 #include "kvm_test_harness.h"
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "vmx.h"
13
14 #define MSR_NON_EXISTENT 0x474f4f00
15
16 static u64 deny_bits = 0;
17 struct kvm_msr_filter filter_allow = {
18 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
19 .ranges = {
20 {
21 .flags = KVM_MSR_FILTER_READ |
22 KVM_MSR_FILTER_WRITE,
23 .nmsrs = 1,
24 /* Test an MSR the kernel knows about. */
25 .base = MSR_IA32_XSS,
26 .bitmap = (uint8_t*)&deny_bits,
27 }, {
28 .flags = KVM_MSR_FILTER_READ |
29 KVM_MSR_FILTER_WRITE,
30 .nmsrs = 1,
31 /* Test an MSR the kernel doesn't know about. */
32 .base = MSR_IA32_FLUSH_CMD,
33 .bitmap = (uint8_t*)&deny_bits,
34 }, {
35 .flags = KVM_MSR_FILTER_READ |
36 KVM_MSR_FILTER_WRITE,
37 .nmsrs = 1,
38 /* Test a fabricated MSR that no one knows about. */
39 .base = MSR_NON_EXISTENT,
40 .bitmap = (uint8_t*)&deny_bits,
41 },
42 },
43 };
44
45 struct kvm_msr_filter filter_fs = {
46 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
47 .ranges = {
48 {
49 .flags = KVM_MSR_FILTER_READ,
50 .nmsrs = 1,
51 .base = MSR_FS_BASE,
52 .bitmap = (uint8_t*)&deny_bits,
53 },
54 },
55 };
56
57 struct kvm_msr_filter filter_gs = {
58 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
59 .ranges = {
60 {
61 .flags = KVM_MSR_FILTER_READ,
62 .nmsrs = 1,
63 .base = MSR_GS_BASE,
64 .bitmap = (uint8_t*)&deny_bits,
65 },
66 },
67 };
68
69 static uint64_t msr_non_existent_data;
70 static int guest_exception_count;
71 static u32 msr_reads, msr_writes;
72
73 static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
74 static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
75 static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
76 static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
77 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
78 static u8 bitmap_deadbeef[1] = { 0x1 };
79
deny_msr(uint8_t * bitmap,u32 msr)80 static void deny_msr(uint8_t *bitmap, u32 msr)
81 {
82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
83
84 bitmap[idx / 8] &= ~(1 << (idx % 8));
85 }
86
prepare_bitmaps(void)87 static void prepare_bitmaps(void)
88 {
89 memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
90 memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
91 memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
92 memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
93 memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
94
95 deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
96 deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
97 deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
98 }
99
100 struct kvm_msr_filter filter_deny = {
101 .flags = KVM_MSR_FILTER_DEFAULT_DENY,
102 .ranges = {
103 {
104 .flags = KVM_MSR_FILTER_READ,
105 .base = 0x00000000,
106 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
107 .bitmap = bitmap_00000000,
108 }, {
109 .flags = KVM_MSR_FILTER_WRITE,
110 .base = 0x00000000,
111 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
112 .bitmap = bitmap_00000000_write,
113 }, {
114 .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
115 .base = 0x40000000,
116 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
117 .bitmap = bitmap_40000000,
118 }, {
119 .flags = KVM_MSR_FILTER_READ,
120 .base = 0xc0000000,
121 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
122 .bitmap = bitmap_c0000000_read,
123 }, {
124 .flags = KVM_MSR_FILTER_WRITE,
125 .base = 0xc0000000,
126 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
127 .bitmap = bitmap_c0000000,
128 }, {
129 .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
130 .base = 0xdeadbeef,
131 .nmsrs = 1,
132 .bitmap = bitmap_deadbeef,
133 },
134 },
135 };
136
137 struct kvm_msr_filter no_filter_deny = {
138 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
139 };
140
141 /*
142 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
143 * rdmsr_start and rdmsr_end, from being defined multiple times.
144 */
test_rdmsr(uint32_t msr)145 static noinline uint64_t test_rdmsr(uint32_t msr)
146 {
147 uint32_t a, d;
148
149 guest_exception_count = 0;
150
151 __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
152 "=a"(a), "=d"(d) : "c"(msr) : "memory");
153
154 return a | ((uint64_t) d << 32);
155 }
156
157 /*
158 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
159 * wrmsr_start and wrmsr_end, from being defined multiple times.
160 */
test_wrmsr(uint32_t msr,uint64_t value)161 static noinline void test_wrmsr(uint32_t msr, uint64_t value)
162 {
163 uint32_t a = value;
164 uint32_t d = value >> 32;
165
166 guest_exception_count = 0;
167
168 __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
169 "a"(a), "d"(d), "c"(msr) : "memory");
170 }
171
172 extern char rdmsr_start, rdmsr_end;
173 extern char wrmsr_start, wrmsr_end;
174
175 /*
176 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
177 * rdmsr_start and rdmsr_end, from being defined multiple times.
178 */
test_em_rdmsr(uint32_t msr)179 static noinline uint64_t test_em_rdmsr(uint32_t msr)
180 {
181 uint32_t a, d;
182
183 guest_exception_count = 0;
184
185 __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
186 "=a"(a), "=d"(d) : "c"(msr) : "memory");
187
188 return a | ((uint64_t) d << 32);
189 }
190
191 /*
192 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
193 * wrmsr_start and wrmsr_end, from being defined multiple times.
194 */
test_em_wrmsr(uint32_t msr,uint64_t value)195 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
196 {
197 uint32_t a = value;
198 uint32_t d = value >> 32;
199
200 guest_exception_count = 0;
201
202 __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
203 "a"(a), "d"(d), "c"(msr) : "memory");
204 }
205
206 extern char em_rdmsr_start, em_rdmsr_end;
207 extern char em_wrmsr_start, em_wrmsr_end;
208
guest_code_filter_allow(void)209 static void guest_code_filter_allow(void)
210 {
211 uint64_t data;
212
213 /*
214 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
215 *
216 * A GP is thrown if anything other than 0 is written to
217 * MSR_IA32_XSS.
218 */
219 data = test_rdmsr(MSR_IA32_XSS);
220 GUEST_ASSERT(data == 0);
221 GUEST_ASSERT(guest_exception_count == 0);
222
223 test_wrmsr(MSR_IA32_XSS, 0);
224 GUEST_ASSERT(guest_exception_count == 0);
225
226 test_wrmsr(MSR_IA32_XSS, 1);
227 GUEST_ASSERT(guest_exception_count == 1);
228
229 /*
230 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
231 *
232 * A GP is thrown if MSR_IA32_FLUSH_CMD is read
233 * from or if a value other than 1 is written to it.
234 */
235 test_rdmsr(MSR_IA32_FLUSH_CMD);
236 GUEST_ASSERT(guest_exception_count == 1);
237
238 test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
239 GUEST_ASSERT(guest_exception_count == 1);
240
241 test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
242 GUEST_ASSERT(guest_exception_count == 0);
243
244 /*
245 * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
246 *
247 * Test that a fabricated MSR can pass through the kernel
248 * and be handled in userspace.
249 */
250 test_wrmsr(MSR_NON_EXISTENT, 2);
251 GUEST_ASSERT(guest_exception_count == 0);
252
253 data = test_rdmsr(MSR_NON_EXISTENT);
254 GUEST_ASSERT(data == 2);
255 GUEST_ASSERT(guest_exception_count == 0);
256
257 if (is_forced_emulation_enabled) {
258 /* Let userspace know we aren't done. */
259 GUEST_SYNC(0);
260
261 /*
262 * Now run the same tests with the instruction emulator.
263 */
264 data = test_em_rdmsr(MSR_IA32_XSS);
265 GUEST_ASSERT(data == 0);
266 GUEST_ASSERT(guest_exception_count == 0);
267 test_em_wrmsr(MSR_IA32_XSS, 0);
268 GUEST_ASSERT(guest_exception_count == 0);
269 test_em_wrmsr(MSR_IA32_XSS, 1);
270 GUEST_ASSERT(guest_exception_count == 1);
271
272 test_em_rdmsr(MSR_IA32_FLUSH_CMD);
273 GUEST_ASSERT(guest_exception_count == 1);
274 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
275 GUEST_ASSERT(guest_exception_count == 1);
276 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
277 GUEST_ASSERT(guest_exception_count == 0);
278
279 test_em_wrmsr(MSR_NON_EXISTENT, 2);
280 GUEST_ASSERT(guest_exception_count == 0);
281 data = test_em_rdmsr(MSR_NON_EXISTENT);
282 GUEST_ASSERT(data == 2);
283 GUEST_ASSERT(guest_exception_count == 0);
284 }
285
286 GUEST_DONE();
287 }
288
guest_msr_calls(bool trapped)289 static void guest_msr_calls(bool trapped)
290 {
291 /* This goes into the in-kernel emulation */
292 wrmsr(MSR_SYSCALL_MASK, 0);
293
294 if (trapped) {
295 /* This goes into user space emulation */
296 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
297 GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
298 } else {
299 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
300 GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
301 }
302
303 /* If trapped == true, this goes into user space emulation */
304 wrmsr(MSR_IA32_POWER_CTL, 0x1234);
305
306 /* This goes into the in-kernel emulation */
307 rdmsr(MSR_IA32_POWER_CTL);
308
309 /* Invalid MSR, should always be handled by user space exit */
310 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
311 wrmsr(0xdeadbeef, 0x1234);
312 }
313
guest_code_filter_deny(void)314 static void guest_code_filter_deny(void)
315 {
316 guest_msr_calls(true);
317
318 /*
319 * Disable msr filtering, so that the kernel
320 * handles everything in the next round
321 */
322 GUEST_SYNC(0);
323
324 guest_msr_calls(false);
325
326 GUEST_DONE();
327 }
328
guest_code_permission_bitmap(void)329 static void guest_code_permission_bitmap(void)
330 {
331 uint64_t data;
332
333 data = test_rdmsr(MSR_FS_BASE);
334 GUEST_ASSERT(data == MSR_FS_BASE);
335 data = test_rdmsr(MSR_GS_BASE);
336 GUEST_ASSERT(data != MSR_GS_BASE);
337
338 /* Let userspace know to switch the filter */
339 GUEST_SYNC(0);
340
341 data = test_rdmsr(MSR_FS_BASE);
342 GUEST_ASSERT(data != MSR_FS_BASE);
343 data = test_rdmsr(MSR_GS_BASE);
344 GUEST_ASSERT(data == MSR_GS_BASE);
345
346 /* Access the MSRs again to ensure KVM has disabled interception.*/
347 data = test_rdmsr(MSR_FS_BASE);
348 GUEST_ASSERT(data != MSR_FS_BASE);
349 data = test_rdmsr(MSR_GS_BASE);
350 GUEST_ASSERT(data != MSR_GS_BASE);
351
352 GUEST_DONE();
353 }
354
__guest_gp_handler(struct ex_regs * regs,char * r_start,char * r_end,char * w_start,char * w_end)355 static void __guest_gp_handler(struct ex_regs *regs,
356 char *r_start, char *r_end,
357 char *w_start, char *w_end)
358 {
359 if (regs->rip == (uintptr_t)r_start) {
360 regs->rip = (uintptr_t)r_end;
361 regs->rax = 0;
362 regs->rdx = 0;
363 } else if (regs->rip == (uintptr_t)w_start) {
364 regs->rip = (uintptr_t)w_end;
365 } else {
366 GUEST_ASSERT(!"RIP is at an unknown location!");
367 }
368
369 ++guest_exception_count;
370 }
371
guest_gp_handler(struct ex_regs * regs)372 static void guest_gp_handler(struct ex_regs *regs)
373 {
374 __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
375 &wrmsr_start, &wrmsr_end);
376 }
377
guest_fep_gp_handler(struct ex_regs * regs)378 static void guest_fep_gp_handler(struct ex_regs *regs)
379 {
380 __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
381 &em_wrmsr_start, &em_wrmsr_end);
382 }
383
check_for_guest_assert(struct kvm_vcpu * vcpu)384 static void check_for_guest_assert(struct kvm_vcpu *vcpu)
385 {
386 struct ucall uc;
387
388 if (vcpu->run->exit_reason == KVM_EXIT_IO &&
389 get_ucall(vcpu, &uc) == UCALL_ABORT) {
390 REPORT_GUEST_ASSERT(uc);
391 }
392 }
393
process_rdmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)394 static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
395 {
396 struct kvm_run *run = vcpu->run;
397
398 check_for_guest_assert(vcpu);
399
400 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_RDMSR);
401 TEST_ASSERT(run->msr.index == msr_index,
402 "Unexpected msr (0x%04x), expected 0x%04x",
403 run->msr.index, msr_index);
404
405 switch (run->msr.index) {
406 case MSR_IA32_XSS:
407 run->msr.data = 0;
408 break;
409 case MSR_IA32_FLUSH_CMD:
410 run->msr.error = 1;
411 break;
412 case MSR_NON_EXISTENT:
413 run->msr.data = msr_non_existent_data;
414 break;
415 case MSR_FS_BASE:
416 run->msr.data = MSR_FS_BASE;
417 break;
418 case MSR_GS_BASE:
419 run->msr.data = MSR_GS_BASE;
420 break;
421 default:
422 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
423 }
424 }
425
process_wrmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)426 static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
427 {
428 struct kvm_run *run = vcpu->run;
429
430 check_for_guest_assert(vcpu);
431
432 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_WRMSR);
433 TEST_ASSERT(run->msr.index == msr_index,
434 "Unexpected msr (0x%04x), expected 0x%04x",
435 run->msr.index, msr_index);
436
437 switch (run->msr.index) {
438 case MSR_IA32_XSS:
439 if (run->msr.data != 0)
440 run->msr.error = 1;
441 break;
442 case MSR_IA32_FLUSH_CMD:
443 if (run->msr.data != 1)
444 run->msr.error = 1;
445 break;
446 case MSR_NON_EXISTENT:
447 msr_non_existent_data = run->msr.data;
448 break;
449 default:
450 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
451 }
452 }
453
process_ucall_done(struct kvm_vcpu * vcpu)454 static void process_ucall_done(struct kvm_vcpu *vcpu)
455 {
456 struct ucall uc;
457
458 check_for_guest_assert(vcpu);
459
460 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
461
462 TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
463 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
464 uc.cmd, UCALL_DONE);
465 }
466
process_ucall(struct kvm_vcpu * vcpu)467 static uint64_t process_ucall(struct kvm_vcpu *vcpu)
468 {
469 struct ucall uc = {};
470
471 check_for_guest_assert(vcpu);
472
473 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
474
475 switch (get_ucall(vcpu, &uc)) {
476 case UCALL_SYNC:
477 break;
478 case UCALL_ABORT:
479 check_for_guest_assert(vcpu);
480 break;
481 case UCALL_DONE:
482 process_ucall_done(vcpu);
483 break;
484 default:
485 TEST_ASSERT(false, "Unexpected ucall");
486 }
487
488 return uc.cmd;
489 }
490
run_guest_then_process_rdmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)491 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
492 uint32_t msr_index)
493 {
494 vcpu_run(vcpu);
495 process_rdmsr(vcpu, msr_index);
496 }
497
run_guest_then_process_wrmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)498 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
499 uint32_t msr_index)
500 {
501 vcpu_run(vcpu);
502 process_wrmsr(vcpu, msr_index);
503 }
504
run_guest_then_process_ucall(struct kvm_vcpu * vcpu)505 static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
506 {
507 vcpu_run(vcpu);
508 return process_ucall(vcpu);
509 }
510
run_guest_then_process_ucall_done(struct kvm_vcpu * vcpu)511 static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
512 {
513 vcpu_run(vcpu);
514 process_ucall_done(vcpu);
515 }
516
517 KVM_ONE_VCPU_TEST_SUITE(user_msr);
518
KVM_ONE_VCPU_TEST(user_msr,msr_filter_allow,guest_code_filter_allow)519 KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow)
520 {
521 struct kvm_vm *vm = vcpu->vm;
522 uint64_t cmd;
523 int rc;
524
525 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
526 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
527 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
528
529 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
530 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
531
532 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
533
534 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
535
536 /* Process guest code userspace exits. */
537 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
538 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
539 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
540
541 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
542 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
543 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
544
545 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
546 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
547
548 vcpu_run(vcpu);
549 cmd = process_ucall(vcpu);
550
551 if (is_forced_emulation_enabled) {
552 TEST_ASSERT_EQ(cmd, UCALL_SYNC);
553 vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
554
555 /* Process emulated rdmsr and wrmsr instructions. */
556 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
557 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
558 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
559
560 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
561 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
562 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
563
564 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
565 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
566
567 /* Confirm the guest completed without issues. */
568 run_guest_then_process_ucall_done(vcpu);
569 } else {
570 TEST_ASSERT_EQ(cmd, UCALL_DONE);
571 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
572 }
573 }
574
handle_ucall(struct kvm_vcpu * vcpu)575 static int handle_ucall(struct kvm_vcpu *vcpu)
576 {
577 struct ucall uc;
578
579 switch (get_ucall(vcpu, &uc)) {
580 case UCALL_ABORT:
581 REPORT_GUEST_ASSERT(uc);
582 break;
583 case UCALL_SYNC:
584 vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
585 break;
586 case UCALL_DONE:
587 return 1;
588 default:
589 TEST_FAIL("Unknown ucall %lu", uc.cmd);
590 }
591
592 return 0;
593 }
594
handle_rdmsr(struct kvm_run * run)595 static void handle_rdmsr(struct kvm_run *run)
596 {
597 run->msr.data = run->msr.index;
598 msr_reads++;
599
600 if (run->msr.index == MSR_SYSCALL_MASK ||
601 run->msr.index == MSR_GS_BASE) {
602 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
603 "MSR read trap w/o access fault");
604 }
605
606 if (run->msr.index == 0xdeadbeef) {
607 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
608 "MSR deadbeef read trap w/o inval fault");
609 }
610 }
611
handle_wrmsr(struct kvm_run * run)612 static void handle_wrmsr(struct kvm_run *run)
613 {
614 /* ignore */
615 msr_writes++;
616
617 if (run->msr.index == MSR_IA32_POWER_CTL) {
618 TEST_ASSERT(run->msr.data == 0x1234,
619 "MSR data for MSR_IA32_POWER_CTL incorrect");
620 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
621 "MSR_IA32_POWER_CTL trap w/o access fault");
622 }
623
624 if (run->msr.index == 0xdeadbeef) {
625 TEST_ASSERT(run->msr.data == 0x1234,
626 "MSR data for deadbeef incorrect");
627 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
628 "deadbeef trap w/o inval fault");
629 }
630 }
631
KVM_ONE_VCPU_TEST(user_msr,msr_filter_deny,guest_code_filter_deny)632 KVM_ONE_VCPU_TEST(user_msr, msr_filter_deny, guest_code_filter_deny)
633 {
634 struct kvm_vm *vm = vcpu->vm;
635 struct kvm_run *run = vcpu->run;
636 int rc;
637
638 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
639 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
640 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
641 KVM_MSR_EXIT_REASON_UNKNOWN |
642 KVM_MSR_EXIT_REASON_FILTER);
643
644 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
645 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
646
647 prepare_bitmaps();
648 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
649
650 while (1) {
651 vcpu_run(vcpu);
652
653 switch (run->exit_reason) {
654 case KVM_EXIT_X86_RDMSR:
655 handle_rdmsr(run);
656 break;
657 case KVM_EXIT_X86_WRMSR:
658 handle_wrmsr(run);
659 break;
660 case KVM_EXIT_IO:
661 if (handle_ucall(vcpu))
662 goto done;
663 break;
664 }
665
666 }
667
668 done:
669 TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
670 TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
671 }
672
KVM_ONE_VCPU_TEST(user_msr,msr_permission_bitmap,guest_code_permission_bitmap)673 KVM_ONE_VCPU_TEST(user_msr, msr_permission_bitmap, guest_code_permission_bitmap)
674 {
675 struct kvm_vm *vm = vcpu->vm;
676 int rc;
677
678 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
679 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
680 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
681
682 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
683 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
684
685 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
686 run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
687 TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
688 "Expected ucall state to be UCALL_SYNC.");
689 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
690 run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
691
692 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
693 run_guest_then_process_ucall_done(vcpu);
694 }
695
696 #define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
697 ({ \
698 int r = __vm_ioctl(vm, cmd, arg); \
699 \
700 if (flag & valid_mask) \
701 TEST_ASSERT(!r, __KVM_IOCTL_ERROR(#cmd, r)); \
702 else \
703 TEST_ASSERT(r == -1 && errno == EINVAL, \
704 "Wanted EINVAL for %s with flag = 0x%llx, got rc: %i errno: %i (%s)", \
705 #cmd, flag, r, errno, strerror(errno)); \
706 })
707
run_user_space_msr_flag_test(struct kvm_vm * vm)708 static void run_user_space_msr_flag_test(struct kvm_vm *vm)
709 {
710 struct kvm_enable_cap cap = { .cap = KVM_CAP_X86_USER_SPACE_MSR };
711 int nflags = sizeof(cap.args[0]) * BITS_PER_BYTE;
712 int rc;
713 int i;
714
715 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
716 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
717
718 for (i = 0; i < nflags; i++) {
719 cap.args[0] = BIT_ULL(i);
720 test_user_exit_msr_ioctl(vm, KVM_ENABLE_CAP, &cap,
721 BIT_ULL(i), KVM_MSR_EXIT_REASON_VALID_MASK);
722 }
723 }
724
run_msr_filter_flag_test(struct kvm_vm * vm)725 static void run_msr_filter_flag_test(struct kvm_vm *vm)
726 {
727 u64 deny_bits = 0;
728 struct kvm_msr_filter filter = {
729 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
730 .ranges = {
731 {
732 .flags = KVM_MSR_FILTER_READ,
733 .nmsrs = 1,
734 .base = 0,
735 .bitmap = (uint8_t *)&deny_bits,
736 },
737 },
738 };
739 int nflags;
740 int rc;
741 int i;
742
743 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
744 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
745
746 nflags = sizeof(filter.flags) * BITS_PER_BYTE;
747 for (i = 0; i < nflags; i++) {
748 filter.flags = BIT_ULL(i);
749 test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
750 BIT_ULL(i), KVM_MSR_FILTER_VALID_MASK);
751 }
752
753 filter.flags = KVM_MSR_FILTER_DEFAULT_ALLOW;
754 nflags = sizeof(filter.ranges[0].flags) * BITS_PER_BYTE;
755 for (i = 0; i < nflags; i++) {
756 filter.ranges[0].flags = BIT_ULL(i);
757 test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
758 BIT_ULL(i), KVM_MSR_FILTER_RANGE_VALID_MASK);
759 }
760 }
761
762 /* Test that attempts to write to the unused bits in a flag fails. */
KVM_ONE_VCPU_TEST(user_msr,user_exit_msr_flags,NULL)763 KVM_ONE_VCPU_TEST(user_msr, user_exit_msr_flags, NULL)
764 {
765 struct kvm_vm *vm = vcpu->vm;
766
767 /* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
768 run_user_space_msr_flag_test(vm);
769
770 /* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
771 run_msr_filter_flag_test(vm);
772 }
773
main(int argc,char * argv[])774 int main(int argc, char *argv[])
775 {
776 return test_harness_run(argc, argv);
777 }
778