1 #include "apic.h"
2 #include "svm.h"
3 #include "vm.h"
4 #include "alloc_page.h"
5 #include "vmalloc.h"
6
7 static void *scratch_page;
8
null_test(struct svm_test * test)9 static void null_test(struct svm_test *test)
10 {
11 }
12
npt_np_prepare(struct svm_test * test)13 static void npt_np_prepare(struct svm_test *test)
14 {
15 u64 *pte;
16
17 scratch_page = alloc_page();
18 pte = npt_get_pte((u64) scratch_page);
19
20 *pte &= ~1ULL;
21 }
22
npt_np_test(struct svm_test * test)23 static void npt_np_test(struct svm_test *test)
24 {
25 (void)*(volatile u64 *)scratch_page;
26 }
27
npt_np_check(struct svm_test * test)28 static bool npt_np_check(struct svm_test *test)
29 {
30 u64 *pte = npt_get_pte((u64) scratch_page);
31
32 *pte |= 1ULL;
33
34 return (vmcb->control.exit_code == SVM_EXIT_NPF)
35 && (vmcb->control.exit_info_1 == 0x100000004ULL);
36 }
37
npt_nx_prepare(struct svm_test * test)38 static void npt_nx_prepare(struct svm_test *test)
39 {
40 u64 *pte;
41
42 test->scratch = rdmsr(MSR_EFER);
43 wrmsr(MSR_EFER, test->scratch | EFER_NX);
44
45 /* Clear the guest's EFER.NX, it should not affect NPT behavior. */
46 vmcb->save.efer &= ~EFER_NX;
47
48 pte = npt_get_pte((u64) null_test);
49
50 *pte |= PT64_NX_MASK;
51 }
52
npt_nx_check(struct svm_test * test)53 static bool npt_nx_check(struct svm_test *test)
54 {
55 u64 *pte = npt_get_pte((u64) null_test);
56
57 wrmsr(MSR_EFER, test->scratch);
58
59 *pte &= ~PT64_NX_MASK;
60
61 return (vmcb->control.exit_code == SVM_EXIT_NPF)
62 && (vmcb->control.exit_info_1 == 0x100000015ULL);
63 }
64
npt_us_prepare(struct svm_test * test)65 static void npt_us_prepare(struct svm_test *test)
66 {
67 u64 *pte;
68
69 scratch_page = alloc_page();
70 pte = npt_get_pte((u64) scratch_page);
71
72 *pte &= ~(1ULL << 2);
73 }
74
npt_us_test(struct svm_test * test)75 static void npt_us_test(struct svm_test *test)
76 {
77 (void)*(volatile u64 *)scratch_page;
78 }
79
npt_us_check(struct svm_test * test)80 static bool npt_us_check(struct svm_test *test)
81 {
82 u64 *pte = npt_get_pte((u64) scratch_page);
83
84 *pte |= (1ULL << 2);
85
86 return (vmcb->control.exit_code == SVM_EXIT_NPF)
87 && (vmcb->control.exit_info_1 == 0x100000005ULL);
88 }
89
npt_rw_prepare(struct svm_test * test)90 static void npt_rw_prepare(struct svm_test *test)
91 {
92
93 u64 *pte;
94
95 pte = npt_get_pte(0x80000);
96
97 *pte &= ~(1ULL << 1);
98 }
99
npt_rw_test(struct svm_test * test)100 static void npt_rw_test(struct svm_test *test)
101 {
102 u64 *data = (void *)(0x80000);
103
104 *data = 0;
105 }
106
npt_rw_check(struct svm_test * test)107 static bool npt_rw_check(struct svm_test *test)
108 {
109 u64 *pte = npt_get_pte(0x80000);
110
111 *pte |= (1ULL << 1);
112
113 return (vmcb->control.exit_code == SVM_EXIT_NPF)
114 && (vmcb->control.exit_info_1 == 0x100000007ULL);
115 }
116
npt_rw_pfwalk_prepare(struct svm_test * test)117 static void npt_rw_pfwalk_prepare(struct svm_test *test)
118 {
119
120 u64 *pte;
121
122 pte = npt_get_pte(read_cr3());
123
124 *pte &= ~(1ULL << 1);
125 }
126
npt_rw_pfwalk_check(struct svm_test * test)127 static bool npt_rw_pfwalk_check(struct svm_test *test)
128 {
129 u64 *pte = npt_get_pte(read_cr3());
130
131 *pte |= (1ULL << 1);
132
133 return (vmcb->control.exit_code == SVM_EXIT_NPF)
134 && (vmcb->control.exit_info_1 == 0x200000007ULL)
135 && (vmcb->control.exit_info_2 == read_cr3());
136 }
137
138 static bool was_x2apic;
139
npt_apic_prepare(void)140 static void npt_apic_prepare(void)
141 {
142 was_x2apic = is_x2apic_enabled();
143
144 if (was_x2apic)
145 reset_apic();
146 }
147
npt_apic_restore(void)148 static void npt_apic_restore(void)
149 {
150 if (was_x2apic)
151 enable_x2apic();
152
153 was_x2apic = false;
154 }
155
npt_l1mmio_prepare(struct svm_test * test)156 static void npt_l1mmio_prepare(struct svm_test *test)
157 {
158 npt_apic_prepare();
159 }
160
161 u32 nested_apic_version1;
162 u32 nested_apic_version2;
163
npt_l1mmio_test(struct svm_test * test)164 static void npt_l1mmio_test(struct svm_test *test)
165 {
166 volatile u32 *data = (volatile void *)(0xfee00030UL);
167
168 nested_apic_version1 = *data;
169 nested_apic_version2 = *data;
170 }
171
npt_l1mmio_check(struct svm_test * test)172 static bool npt_l1mmio_check(struct svm_test *test)
173 {
174 volatile u32 *data = (volatile void *)(0xfee00030);
175 u32 lvr = *data;
176
177 /* Restore APIC state *after* reading LVR. */
178 npt_apic_restore();
179
180 return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
181 }
182
npt_rw_l1mmio_prepare(struct svm_test * test)183 static void npt_rw_l1mmio_prepare(struct svm_test *test)
184 {
185
186 u64 *pte;
187
188 npt_apic_prepare();
189
190 pte = npt_get_pte(0xfee00080);
191
192 *pte &= ~(1ULL << 1);
193 }
194
npt_rw_l1mmio_test(struct svm_test * test)195 static void npt_rw_l1mmio_test(struct svm_test *test)
196 {
197 volatile u32 *data = (volatile void *)(0xfee00080);
198
199 *data = *data;
200 }
201
npt_rw_l1mmio_check(struct svm_test * test)202 static bool npt_rw_l1mmio_check(struct svm_test *test)
203 {
204 u64 *pte = npt_get_pte(0xfee00080);
205
206 *pte |= (1ULL << 1);
207
208 npt_apic_restore();
209
210 return (vmcb->control.exit_code == SVM_EXIT_NPF)
211 && (vmcb->control.exit_info_1 == 0x100000007ULL);
212 }
213
basic_guest_main(struct svm_test * test)214 static void basic_guest_main(struct svm_test *test)
215 {
216 }
217
__svm_npt_rsvd_bits_test(u64 * pxe,u64 rsvd_bits,u64 efer,ulong cr4,u64 guest_efer,ulong guest_cr4)218 static void __svm_npt_rsvd_bits_test(u64 * pxe, u64 rsvd_bits, u64 efer,
219 ulong cr4, u64 guest_efer, ulong guest_cr4)
220 {
221 u64 pxe_orig = *pxe;
222 int exit_reason;
223 u64 pfec;
224
225 wrmsr(MSR_EFER, efer);
226 write_cr4(cr4);
227
228 vmcb->save.efer = guest_efer;
229 vmcb->save.cr4 = guest_cr4;
230
231 *pxe |= rsvd_bits;
232
233 exit_reason = svm_vmrun();
234
235 report(exit_reason == SVM_EXIT_NPF,
236 "Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits,
237 exit_reason);
238
239 if (pxe == npt_get_pdpe((u64) basic_guest_main) || pxe == npt_get_pml4e()) {
240 /*
241 * The guest's page tables will blow up on a bad PDPE/PML4E,
242 * before starting the final walk of the guest page.
243 */
244 pfec = 0x20000000full;
245 } else {
246 /* RSVD #NPF on final walk of guest page. */
247 pfec = 0x10000000dULL;
248
249 /* PFEC.FETCH=1 if NX=1 *or* SMEP=1. */
250 if ((cr4 & X86_CR4_SMEP) || (efer & EFER_NX))
251 pfec |= 0x10;
252
253 }
254
255 report(vmcb->control.exit_info_1 == pfec,
256 "Wanted PFEC = 0x%lx, got PFEC = %lx, PxE = 0x%lx. "
257 "host.NX = %u, host.SMEP = %u, guest.NX = %u, guest.SMEP = %u",
258 pfec, vmcb->control.exit_info_1, *pxe,
259 !!(efer & EFER_NX), !!(cr4 & X86_CR4_SMEP),
260 !!(guest_efer & EFER_NX), !!(guest_cr4 & X86_CR4_SMEP));
261
262 *pxe = pxe_orig;
263 }
264
_svm_npt_rsvd_bits_test(u64 * pxe,u64 pxe_rsvd_bits,u64 efer,ulong cr4,u64 guest_efer,ulong guest_cr4)265 static void _svm_npt_rsvd_bits_test(u64 * pxe, u64 pxe_rsvd_bits, u64 efer,
266 ulong cr4, u64 guest_efer, ulong guest_cr4)
267 {
268 u64 rsvd_bits;
269 int i;
270
271 /*
272 * RDTSC or RDRAND can sometimes fail to generate a valid reserved bits
273 */
274 if (!pxe_rsvd_bits) {
275 report_skip
276 ("svm_npt_rsvd_bits_test: Reserved bits are not valid");
277 return;
278 }
279
280 /*
281 * Test all combinations of guest/host EFER.NX and CR4.SMEP. If host
282 * EFER.NX=0, use NX as the reserved bit, otherwise use the passed in
283 * @pxe_rsvd_bits.
284 */
285 for (i = 0; i < 16; i++) {
286 if (i & 1) {
287 rsvd_bits = pxe_rsvd_bits;
288 efer |= EFER_NX;
289 } else {
290 rsvd_bits = PT64_NX_MASK;
291 efer &= ~EFER_NX;
292 }
293 if (i & 2)
294 cr4 |= X86_CR4_SMEP;
295 else
296 cr4 &= ~X86_CR4_SMEP;
297 if (i & 4)
298 guest_efer |= EFER_NX;
299 else
300 guest_efer &= ~EFER_NX;
301 if (i & 8)
302 guest_cr4 |= X86_CR4_SMEP;
303 else
304 guest_cr4 &= ~X86_CR4_SMEP;
305
306 __svm_npt_rsvd_bits_test(pxe, rsvd_bits, efer, cr4,
307 guest_efer, guest_cr4);
308 }
309 }
310
get_random_bits(u64 hi,u64 low)311 static u64 get_random_bits(u64 hi, u64 low)
312 {
313 unsigned retry = 5;
314 u64 rsvd_bits = 0;
315
316 if (this_cpu_has(X86_FEATURE_RDRAND)) {
317 do {
318 rsvd_bits = (rdrand() << low) & GENMASK_ULL(hi, low);
319 retry--;
320 } while (!rsvd_bits && retry);
321 }
322
323 if (!rsvd_bits) {
324 retry = 5;
325 do {
326 rsvd_bits = (rdtsc() << low) & GENMASK_ULL(hi, low);
327 retry--;
328 } while (!rsvd_bits && retry);
329 }
330
331 return rsvd_bits;
332 }
333
svm_npt_rsvd_bits_test(void)334 static void svm_npt_rsvd_bits_test(void)
335 {
336 u64 saved_efer, host_efer, sg_efer, guest_efer;
337 ulong saved_cr4, host_cr4, sg_cr4, guest_cr4;
338
339 if (!npt_supported()) {
340 report_skip("NPT not supported");
341 return;
342 }
343
344 saved_efer = host_efer = rdmsr(MSR_EFER);
345 saved_cr4 = host_cr4 = read_cr4();
346 sg_efer = guest_efer = vmcb->save.efer;
347 sg_cr4 = guest_cr4 = vmcb->save.cr4;
348
349 test_set_guest(basic_guest_main);
350
351 /*
352 * 4k PTEs don't have reserved bits if MAXPHYADDR >= 52, just skip the
353 * sub-test. The NX test is still valid, but the extra bit of coverage
354 * isn't worth the extra complexity.
355 */
356 if (cpuid_maxphyaddr() >= 52)
357 goto skip_pte_test;
358
359 _svm_npt_rsvd_bits_test(npt_get_pte((u64) basic_guest_main),
360 get_random_bits(51, cpuid_maxphyaddr()),
361 host_efer, host_cr4, guest_efer, guest_cr4);
362
363 skip_pte_test:
364 _svm_npt_rsvd_bits_test(npt_get_pde((u64) basic_guest_main),
365 get_random_bits(20, 13) | PT_PAGE_SIZE_MASK,
366 host_efer, host_cr4, guest_efer, guest_cr4);
367
368 _svm_npt_rsvd_bits_test(npt_get_pdpe((u64) basic_guest_main),
369 PT_PAGE_SIZE_MASK |
370 (this_cpu_has(X86_FEATURE_GBPAGES) ?
371 get_random_bits(29, 13) : 0), host_efer,
372 host_cr4, guest_efer, guest_cr4);
373
374 _svm_npt_rsvd_bits_test(npt_get_pml4e(), BIT_ULL(8),
375 host_efer, host_cr4, guest_efer, guest_cr4);
376
377 wrmsr(MSR_EFER, saved_efer);
378 write_cr4(saved_cr4);
379 vmcb->save.efer = sg_efer;
380 vmcb->save.cr4 = sg_cr4;
381 }
382
383 #define NPT_V1_TEST(name, prepare, guest_code, check) \
384 { #name, npt_supported, prepare, default_prepare_gif_clear, guest_code, \
385 default_finished, check }
386
387 #define NPT_V2_TEST(name) { #name, .v2 = name }
388
389 static struct svm_test npt_tests[] = {
390 NPT_V1_TEST(npt_nx, npt_nx_prepare, null_test, npt_nx_check),
391 NPT_V1_TEST(npt_np, npt_np_prepare, npt_np_test, npt_np_check),
392 NPT_V1_TEST(npt_us, npt_us_prepare, npt_us_test, npt_us_check),
393 NPT_V1_TEST(npt_rw, npt_rw_prepare, npt_rw_test, npt_rw_check),
394 NPT_V1_TEST(npt_rw_pfwalk, npt_rw_pfwalk_prepare, null_test, npt_rw_pfwalk_check),
395 NPT_V1_TEST(npt_l1mmio, npt_l1mmio_prepare, npt_l1mmio_test, npt_l1mmio_check),
396 NPT_V1_TEST(npt_rw_l1mmio, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test, npt_rw_l1mmio_check),
397 NPT_V2_TEST(svm_npt_rsvd_bits_test),
398 { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
399 };
400
main(int ac,char ** av)401 int main(int ac, char **av)
402 {
403 pteval_t opt_mask = 0;
404
405 __setup_vm(&opt_mask);
406 return run_svm_tests(ac, av, npt_tests);
407 }
408