1 /*
2 * Intel LAM unit test
3 *
4 * Copyright (C) 2023 Intel
5 *
6 * Author: Robert Hoo <robert.hu@linux.intel.com>
7 * Binbin Wu <binbin.wu@linux.intel.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or
10 * later.
11 */
12
13 #include "libcflat.h"
14 #include "processor.h"
15 #include "desc.h"
16 #include "vmalloc.h"
17 #include "alloc_page.h"
18 #include "vm.h"
19 #include "asm/io.h"
20 #include "ioram.h"
21
test_cr4_lam_set_clear(void)22 static void test_cr4_lam_set_clear(void)
23 {
24 int vector;
25 bool has_lam = this_cpu_has(X86_FEATURE_LAM);
26
27 vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
28 report(has_lam ? !vector : vector == GP_VECTOR,
29 "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP");
30
31 vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
32 report(!vector, "Expected CR4.LAM_SUP=0 to succeed");
33 }
34
35 /* Refer to emulator.c */
do_mov(void * mem)36 static void do_mov(void *mem)
37 {
38 unsigned long t1, t2;
39
40 t1 = 0x123456789abcdefull & -1ul;
41 asm volatile("mov %[t1], (%[mem])\n\t"
42 "mov (%[mem]), %[t2]"
43 : [t2]"=r"(t2)
44 : [t1]"r"(t1), [mem]"r"(mem)
45 : "memory");
46 report(t1 == t2, "Mov result check");
47 }
48
get_lam_mask(u64 address,u64 * lam_mask)49 static bool get_lam_mask(u64 address, u64* lam_mask)
50 {
51 /*
52 * Use LAM57_MASK as mask to construct non-canonical address if LAM is
53 * not supported or enabled.
54 */
55 *lam_mask = LAM57_MASK;
56
57 /*
58 * Bit 63 determines if the address should be treated as a user address
59 * or a supervisor address.
60 */
61 if (address & BIT_ULL(63)) {
62 if (!(is_lam_sup_enabled()))
63 return false;
64
65 if (!is_la57_enabled())
66 *lam_mask = LAM48_MASK;
67 return true;
68 }
69
70 if(is_lam_u48_enabled()) {
71 *lam_mask = LAM48_MASK;
72 return true;
73 }
74
75 if(is_lam_u57_enabled())
76 return true;
77
78 return false;
79 }
80
81
test_ptr(u64 * ptr,bool is_mmio)82 static void test_ptr(u64* ptr, bool is_mmio)
83 {
84 u64 lam_mask;
85 bool lam_active, fault;
86
87 lam_active = get_lam_mask((u64)ptr, &lam_mask);
88
89 fault = test_for_exception(GP_VECTOR, do_mov, ptr);
90 report(!fault, "Expected access to untagged address for %s to succeed",
91 is_mmio ? "MMIO" : "memory");
92
93 ptr = (u64 *)get_non_canonical((u64)ptr, lam_mask);
94 fault = test_for_exception(GP_VECTOR, do_mov, ptr);
95 report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s",
96 is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without",
97 lam_active ? "succeed" : "#GP");
98
99 /*
100 * This test case is only triggered when LAM_U57 is active and 4-level
101 * paging is used. For the case, bit[56:47] aren't all 0 triggers #GP.
102 */
103 if (lam_active && (lam_mask == LAM57_MASK) && !is_la57_enabled()) {
104 ptr = (u64 *)get_non_canonical((u64)ptr, LAM48_MASK);
105 fault = test_for_exception(GP_VECTOR, do_mov, ptr);
106 report(fault, "Expected access to non-LAM-canonical address for %s to #GP",
107 is_mmio ? "MMIO" : "memory");
108 }
109 }
110
111 /* invlpg with tagged address is same as NOP, no #GP expected. */
test_invlpg(void * va,bool fep)112 static void test_invlpg(void *va, bool fep)
113 {
114 u64 lam_mask;
115 u64 *ptr;
116
117 /*
118 * The return value is not checked, invlpg should never faults no matter
119 * LAM is supported or not.
120 */
121 get_lam_mask((u64)va, &lam_mask);
122 ptr = (u64 *)get_non_canonical((u64)va, lam_mask);
123 if (fep)
124 asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory");
125 else
126 invlpg(ptr);
127
128 report_pass("Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : "");
129 }
130
131 /* LAM doesn't apply to the linear address in the descriptor of invpcid */
test_invpcid(void * data)132 static void test_invpcid(void *data)
133 {
134 /*
135 * Reuse the memory address for the descriptor since stack memory
136 * address in KUT doesn't follow the kernel address space partitions.
137 */
138 struct invpcid_desc *desc_ptr = data;
139 int vector;
140 u64 lam_mask;
141 bool lam_active;
142
143 if (!this_cpu_has(X86_FEATURE_INVPCID)) {
144 report_skip("INVPCID not supported");
145 return;
146 }
147
148 lam_active = get_lam_mask((u64)data, &lam_mask);
149
150 memset(desc_ptr, 0, sizeof(struct invpcid_desc));
151 desc_ptr->addr = (u64)data;
152
153 vector = invpcid_safe(0, desc_ptr);
154 report(!vector,
155 "Expected INVPCID with untagged pointer + untagged addr to succeed, got vector %u",
156 vector);
157
158 desc_ptr->addr = get_non_canonical(desc_ptr->addr, lam_mask);
159 vector = invpcid_safe(0, desc_ptr);
160 report(vector == GP_VECTOR,
161 "Expected INVPCID with untagged pointer + tagged addr to #GP, got vector %u",
162 vector);
163
164 desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
165 vector = invpcid_safe(0, desc_ptr);
166 report(vector == GP_VECTOR,
167 "Expected INVPCID with tagged pointer + tagged addr to #GP, got vector %u",
168 vector);
169
170 desc_ptr = data;
171 desc_ptr->addr = (u64)data;
172 desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
173 vector = invpcid_safe(0, desc_ptr);
174 report(lam_active ? !vector : vector == GP_VECTOR,
175 "Expected INVPCID with tagged pointer + untagged addr to %s, got vector %u",
176 lam_active ? "succeed" : "#GP", vector);
177 }
178
__test_lam_sup(void * vaddr,void * vaddr_mmio)179 static void __test_lam_sup(void *vaddr, void *vaddr_mmio)
180 {
181 /* Test for normal memory. */
182 test_ptr(vaddr, false);
183 /* Test for MMIO to trigger instruction emulation. */
184 test_ptr(vaddr_mmio, true);
185 test_invpcid(vaddr);
186 test_invlpg(vaddr, false);
187 if (is_fep_available())
188 test_invlpg(vaddr, true);
189 }
190
test_lam_sup(void)191 static void test_lam_sup(void)
192 {
193 void *vaddr, *vaddr_mmio;
194 phys_addr_t paddr;
195 unsigned long cr4 = read_cr4();
196 int vector;
197
198 /*
199 * KUT initializes vfree_top to 0 for X86_64, and each virtual address
200 * allocation decreases the size from vfree_top. It's guaranteed that
201 * the return value of alloc_vpage() is considered as kernel mode
202 * address and canonical since only a small amount of virtual address
203 * range is allocated in this test.
204 */
205 vaddr = alloc_vpage();
206 vaddr_mmio = alloc_vpage();
207 paddr = virt_to_phys(alloc_page());
208 install_page(current_page_table(), paddr, vaddr);
209 install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
210
211 test_cr4_lam_set_clear();
212
213 /* Test without LAM Supervisor enabled. */
214 __test_lam_sup(vaddr, vaddr_mmio);
215
216 /* Test with LAM Supervisor enabled, if supported. */
217 if (this_cpu_has(X86_FEATURE_LAM)) {
218 vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
219 report(!vector && is_lam_sup_enabled(),
220 "Expected CR4.LAM_SUP=1 to succeed");
221 __test_lam_sup(vaddr, vaddr_mmio);
222 }
223 }
224
test_lam_user(void)225 static void test_lam_user(void)
226 {
227 void* vaddr;
228 int vector;
229 unsigned long cr3 = read_cr3() & ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
230 bool has_lam = this_cpu_has(X86_FEATURE_LAM);
231
232 /*
233 * The physical address of AREA_NORMAL is within 36 bits, so that using
234 * identical mapping, the linear address will be considered as user mode
235 * address from the view of LAM, and the metadata bits are not used as
236 * address for both LAM48 and LAM57.
237 */
238 vaddr = alloc_pages_flags(0, AREA_NORMAL);
239 _Static_assert((AREA_NORMAL_PFN & GENMASK(63, 47)) == 0UL,
240 "Identical mapping range check");
241
242 /*
243 * Note, LAM doesn't have a global control bit to turn on/off LAM
244 * completely, but purely depends on hardware's CPUID to determine it
245 * can be enabled or not. That means, when EPT is on, even when KVM
246 * doesn't expose LAM to guest, the guest can still set LAM control bits
247 * in CR3 w/o causing problem. This is an unfortunate virtualization
248 * hole. KVM doesn't choose to intercept CR3 in this case for
249 * performance.
250 * Only enable LAM CR3 bits when LAM feature is exposed.
251 */
252 if (has_lam) {
253 vector = write_cr3_safe(cr3 | X86_CR3_LAM_U48);
254 report(!vector && is_lam_u48_enabled(), "Expected CR3.LAM_U48=1 to succeed");
255 }
256 /*
257 * Physical memory & MMIO have already been identical mapped in
258 * setup_mmu().
259 */
260 test_ptr(vaddr, false);
261 test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
262
263 if (has_lam) {
264 vector = write_cr3_safe(cr3 | X86_CR3_LAM_U57);
265 report(!vector && is_lam_u57_enabled(), "Expected CR3.LAM_U57=1 to succeed");
266
267 /* If !has_lam, it has been tested above, no need to test again. */
268 test_ptr(vaddr, false);
269 test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
270 }
271 }
272
main(int ac,char ** av)273 int main(int ac, char **av)
274 {
275 setup_vm();
276
277 if (!this_cpu_has(X86_FEATURE_LAM))
278 report_info("This CPU doesn't support LAM\n");
279 else
280 report_info("This CPU supports LAM\n");
281
282 test_lam_sup();
283 test_lam_user();
284
285 return report_summary();
286 }
287