xref: /kvm-unit-tests/x86/lam.c (revision 863e0b90fb888a5b8a76ff97e13df1f56f037cb2)
1 /*
2  * Intel LAM unit test
3  *
4  * Copyright (C) 2023 Intel
5  *
6  * Author: Robert Hoo <robert.hu@linux.intel.com>
7  *         Binbin Wu <binbin.wu@linux.intel.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or
10  * later.
11  */
12 
13 #include "libcflat.h"
14 #include "processor.h"
15 #include "desc.h"
16 #include <util.h>
17 #include "vmalloc.h"
18 #include "alloc_page.h"
19 #include "vm.h"
20 #include "asm/io.h"
21 #include "ioram.h"
22 
23 static void test_cr4_lam_set_clear(void)
24 {
25 	int vector;
26 	bool has_lam = this_cpu_has(X86_FEATURE_LAM);
27 
28 	vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
29 	report(has_lam ? !vector : vector == GP_VECTOR,
30 	       "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP");
31 
32 	vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
33 	report(!vector, "Expected CR4.LAM_SUP=0 to succeed");
34 }
35 
36 /* Refer to emulator.c */
37 static void do_mov(void *mem)
38 {
39 	unsigned long t1, t2;
40 
41 	t1 = 0x123456789abcdefull & -1ul;
42 	asm volatile("mov %[t1], (%[mem])\n\t"
43 		     "mov (%[mem]), %[t2]"
44 		     : [t2]"=r"(t2)
45 		     : [t1]"r"(t1), [mem]"r"(mem)
46 		     : "memory");
47 	report(t1 == t2, "Mov result check");
48 }
49 
50 static bool get_lam_mask(u64 address, u64* lam_mask)
51 {
52 	/*
53 	 * Use LAM57_MASK as mask to construct non-canonical address if LAM is
54 	 * not supported or enabled.
55 	 */
56 	*lam_mask = LAM57_MASK;
57 
58 	/*
59 	 * Bit 63 determines if the address should be treated as a user address
60 	 * or a supervisor address.
61 	 */
62 	if (address & BIT_ULL(63)) {
63 		if (!(is_lam_sup_enabled()))
64 			return false;
65 
66 		if (!is_la57_enabled())
67 			*lam_mask = LAM48_MASK;
68 		return true;
69 	}
70 
71 	if(is_lam_u48_enabled()) {
72 		*lam_mask = LAM48_MASK;
73 		return true;
74 	}
75 
76 	if(is_lam_u57_enabled())
77 		return true;
78 
79 	return false;
80 }
81 
82 
83 static void test_ptr(u64* ptr, bool is_mmio)
84 {
85 	u64 lam_mask;
86 	bool lam_active, fault;
87 
88 	lam_active = get_lam_mask((u64)ptr, &lam_mask);
89 
90 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
91 	report(!fault, "Expected access to untagged address for %s to succeed",
92 	       is_mmio ? "MMIO" : "memory");
93 
94 	ptr = (u64 *)get_non_canonical((u64)ptr, lam_mask);
95 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
96 	report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s",
97 	       is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without",
98 	       lam_active ? "succeed" : "#GP");
99 
100 	/*
101 	 * This test case is only triggered when LAM_U57 is active and 4-level
102 	 * paging is used. For the case, bit[56:47] aren't all 0 triggers #GP.
103 	 */
104 	if (lam_active && (lam_mask == LAM57_MASK) && !is_la57_enabled()) {
105 		ptr = (u64 *)get_non_canonical((u64)ptr, LAM48_MASK);
106 		fault = test_for_exception(GP_VECTOR, do_mov, ptr);
107 		report(fault, "Expected access to non-LAM-canonical address for %s to #GP",
108 		       is_mmio ? "MMIO" : "memory");
109 	}
110 }
111 
112 /* invlpg with tagged address is same as NOP, no #GP expected. */
113 static void test_invlpg(void *va, bool fep)
114 {
115 	u64 lam_mask;
116 	u64 *ptr;
117 
118 	/*
119 	 * The return value is not checked, invlpg should never faults no matter
120 	 * LAM is supported or not.
121 	 */
122 	get_lam_mask((u64)va, &lam_mask);
123 	ptr = (u64 *)get_non_canonical((u64)va, lam_mask);
124 	if (fep)
125 		asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory");
126 	else
127 		invlpg(ptr);
128 
129 	report_pass("Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : "");
130 }
131 
132 /* LAM doesn't apply to the linear address in the descriptor of invpcid */
133 static void test_invpcid(void *data)
134 {
135 	/*
136 	 * Reuse the memory address for the descriptor since stack memory
137 	 * address in KUT doesn't follow the kernel address space partitions.
138 	 */
139 	struct invpcid_desc *desc_ptr = data;
140 	int vector;
141 	u64 lam_mask;
142 	bool lam_active;
143 
144 	if (!this_cpu_has(X86_FEATURE_INVPCID)) {
145 		report_skip("INVPCID not supported");
146 		return;
147 	}
148 
149 	lam_active = get_lam_mask((u64)data, &lam_mask);
150 
151 	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
152 	desc_ptr->addr = (u64)data;
153 
154 	vector = invpcid_safe(0, desc_ptr);
155 	report(!vector,
156 	       "Expected INVPCID with untagged pointer + untagged addr to succeed, got vector %u",
157 	       vector);
158 
159 	desc_ptr->addr = get_non_canonical(desc_ptr->addr, lam_mask);
160 	vector = invpcid_safe(0, desc_ptr);
161 	report(vector == GP_VECTOR,
162 	       "Expected INVPCID with untagged pointer + tagged addr to #GP, got vector %u",
163 	       vector);
164 
165 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
166 	vector = invpcid_safe(0, desc_ptr);
167 	report(vector == GP_VECTOR,
168 	       "Expected INVPCID with tagged pointer + tagged addr to #GP, got vector %u",
169 	       vector);
170 
171 	desc_ptr = data;
172 	desc_ptr->addr = (u64)data;
173 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
174 	vector = invpcid_safe(0, desc_ptr);
175 	report(lam_active ? !vector : vector == GP_VECTOR,
176 	       "Expected INVPCID with tagged pointer + untagged addr to %s, got vector %u",
177 	       lam_active ? "succeed" : "#GP", vector);
178 }
179 
180 static void __test_lam_sup(void *vaddr, void *vaddr_mmio)
181 {
182 	/* Test for normal memory. */
183 	test_ptr(vaddr, false);
184 	/* Test for MMIO to trigger instruction emulation. */
185 	test_ptr(vaddr_mmio, true);
186 	test_invpcid(vaddr);
187 	test_invlpg(vaddr, false);
188 	if (is_fep_available())
189 		test_invlpg(vaddr, true);
190 }
191 
192 static void test_lam_sup(void)
193 {
194 	void *vaddr, *vaddr_mmio;
195 	phys_addr_t paddr;
196 	unsigned long cr4 = read_cr4();
197 	int vector;
198 
199 	/*
200 	 * KUT initializes vfree_top to 0 for X86_64, and each virtual address
201 	 * allocation decreases the size from vfree_top. It's guaranteed that
202 	 * the return value of alloc_vpage() is considered as kernel mode
203 	 * address and canonical since only a small amount of virtual address
204 	 * range is allocated in this test.
205 	 */
206 	vaddr = alloc_vpage();
207 	vaddr_mmio = alloc_vpage();
208 	paddr = virt_to_phys(alloc_page());
209 	install_page(current_page_table(), paddr, vaddr);
210 	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
211 
212 	test_cr4_lam_set_clear();
213 
214 	/* Test without LAM Supervisor enabled. */
215 	__test_lam_sup(vaddr, vaddr_mmio);
216 
217 	/* Test with LAM Supervisor enabled, if supported. */
218 	if (this_cpu_has(X86_FEATURE_LAM)) {
219 		vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
220 		report(!vector && is_lam_sup_enabled(),
221 		       "Expected CR4.LAM_SUP=1 to succeed");
222 		__test_lam_sup(vaddr, vaddr_mmio);
223 	}
224 }
225 
226 static void test_lam_user(void)
227 {
228 	void* vaddr;
229 	int vector;
230 	unsigned long cr3 = read_cr3() & ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
231 	bool has_lam = this_cpu_has(X86_FEATURE_LAM);
232 
233 	/*
234 	 * The physical address of AREA_NORMAL is within 36 bits, so that using
235 	 * identical mapping, the linear address will be considered as user mode
236 	 * address from the view of LAM, and the metadata bits are not used as
237 	 * address for both LAM48 and LAM57.
238 	 */
239 	vaddr = alloc_pages_flags(0, AREA_NORMAL);
240 	static_assert((AREA_NORMAL_PFN & GENMASK(63, 47)) == 0UL);
241 
242 	/*
243 	 * Note, LAM doesn't have a global control bit to turn on/off LAM
244 	 * completely, but purely depends on hardware's CPUID to determine it
245 	 * can be enabled or not. That means, when EPT is on, even when KVM
246 	 * doesn't expose LAM to guest, the guest can still set LAM control bits
247 	 * in CR3 w/o causing problem. This is an unfortunate virtualization
248 	 * hole. KVM doesn't choose to intercept CR3 in this case for
249 	 * performance.
250 	 * Only enable LAM CR3 bits when LAM feature is exposed.
251 	 */
252 	if (has_lam) {
253 		vector = write_cr3_safe(cr3 | X86_CR3_LAM_U48);
254 		report(!vector && is_lam_u48_enabled(), "Expected CR3.LAM_U48=1 to succeed");
255 	}
256 	/*
257 	 * Physical memory & MMIO have already been identical mapped in
258 	 * setup_mmu().
259 	 */
260 	test_ptr(vaddr, false);
261 	test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
262 
263 	if (has_lam) {
264 		vector = write_cr3_safe(cr3 | X86_CR3_LAM_U57);
265 		report(!vector && is_lam_u57_enabled(), "Expected CR3.LAM_U57=1 to succeed");
266 
267 		/* If !has_lam, it has been tested above, no need to test again. */
268 		test_ptr(vaddr, false);
269 		test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
270 	}
271 }
272 
273 int main(int ac, char **av)
274 {
275 	setup_vm();
276 
277 	if (!this_cpu_has(X86_FEATURE_LAM))
278 		report_info("This CPU doesn't support LAM\n");
279 	else
280 		report_info("This CPU supports LAM\n");
281 
282 	test_lam_sup();
283 	test_lam_user();
284 
285 	return report_summary();
286 }
287