xref: /kvm-unit-tests/x86/lam.c (revision 14520f8e970989c1e8404f570118451c40f5a228)
1 /*
2  * Intel LAM unit test
3  *
4  * Copyright (C) 2023 Intel
5  *
6  * Author: Robert Hoo <robert.hu@linux.intel.com>
7  *         Binbin Wu <binbin.wu@linux.intel.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or
10  * later.
11  */
12 
13 #include "libcflat.h"
14 #include "processor.h"
15 #include "desc.h"
16 #include "vmalloc.h"
17 #include "alloc_page.h"
18 #include "vm.h"
19 #include "asm/io.h"
20 #include "ioram.h"
21 
22 static void test_cr4_lam_set_clear(void)
23 {
24 	int vector;
25 	bool has_lam = this_cpu_has(X86_FEATURE_LAM);
26 
27 	vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
28 	report(has_lam ? !vector : vector == GP_VECTOR,
29 	       "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP");
30 
31 	vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
32 	report(!vector, "Expected CR4.LAM_SUP=0 to succeed");
33 }
34 
35 /* Refer to emulator.c */
36 static void do_mov(void *mem)
37 {
38 	unsigned long t1, t2;
39 
40 	t1 = 0x123456789abcdefull & -1ul;
41 	asm volatile("mov %[t1], (%[mem])\n\t"
42 		     "mov (%[mem]), %[t2]"
43 		     : [t2]"=r"(t2)
44 		     : [t1]"r"(t1), [mem]"r"(mem)
45 		     : "memory");
46 	report(t1 == t2, "Mov result check");
47 }
48 
49 static bool get_lam_mask(u64 address, u64* lam_mask)
50 {
51 	/*
52 	 * Use LAM57_MASK as mask to construct non-canonical address if LAM is
53 	 * not supported or enabled.
54 	 */
55 	*lam_mask = LAM57_MASK;
56 
57 	/*
58 	 * Bit 63 determines if the address should be treated as a user address
59 	 * or a supervisor address.
60 	 */
61 	if (address & BIT_ULL(63)) {
62 		if (!(is_lam_sup_enabled()))
63 			return false;
64 
65 		if (!is_la57_enabled())
66 			*lam_mask = LAM48_MASK;
67 		return true;
68 	}
69 
70 	/* TODO: Get LAM mask for userspace address. */
71 	return false;
72 }
73 
74 
75 static void test_ptr(u64* ptr, bool is_mmio)
76 {
77 	u64 lam_mask;
78 	bool lam_active, fault;
79 
80 	lam_active = get_lam_mask((u64)ptr, &lam_mask);
81 
82 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
83 	report(!fault, "Expected access to untagged address for %s to succeed",
84 	       is_mmio ? "MMIO" : "memory");
85 
86 	ptr = (u64 *)get_non_canonical((u64)ptr, lam_mask);
87 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
88 	report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s",
89 	       is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without",
90 	       lam_active ? "succeed" : "#GP");
91 }
92 
93 /* invlpg with tagged address is same as NOP, no #GP expected. */
94 static void test_invlpg(void *va, bool fep)
95 {
96 	u64 lam_mask;
97 	u64 *ptr;
98 
99 	/*
100 	 * The return value is not checked, invlpg should never faults no matter
101 	 * LAM is supported or not.
102 	 */
103 	get_lam_mask((u64)va, &lam_mask);
104 	ptr = (u64 *)get_non_canonical((u64)va, lam_mask);
105 	if (fep)
106 		asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory");
107 	else
108 		invlpg(ptr);
109 
110 	report_pass("Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : "");
111 }
112 
113 /* LAM doesn't apply to the linear address in the descriptor of invpcid */
114 static void test_invpcid(void *data)
115 {
116 	/*
117 	 * Reuse the memory address for the descriptor since stack memory
118 	 * address in KUT doesn't follow the kernel address space partitions.
119 	 */
120 	struct invpcid_desc *desc_ptr = data;
121 	int vector;
122 	u64 lam_mask;
123 	bool lam_active;
124 
125 	if (!this_cpu_has(X86_FEATURE_INVPCID)) {
126 		report_skip("INVPCID not supported");
127 		return;
128 	}
129 
130 	lam_active = get_lam_mask((u64)data, &lam_mask);
131 
132 	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
133 	desc_ptr->addr = (u64)data;
134 
135 	vector = invpcid_safe(0, desc_ptr);
136 	report(!vector,
137 	       "Expected INVPCID with untagged pointer + untagged addr to succeed, got vector %u",
138 	       vector);
139 
140 	desc_ptr->addr = get_non_canonical(desc_ptr->addr, lam_mask);
141 	vector = invpcid_safe(0, desc_ptr);
142 	report(vector == GP_VECTOR,
143 	       "Expected INVPCID with untagged pointer + tagged addr to #GP, got vector %u",
144 	       vector);
145 
146 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
147 	vector = invpcid_safe(0, desc_ptr);
148 	report(vector == GP_VECTOR,
149 	       "Expected INVPCID with tagged pointer + tagged addr to #GP, got vector %u",
150 	       vector);
151 
152 	desc_ptr = data;
153 	desc_ptr->addr = (u64)data;
154 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
155 	vector = invpcid_safe(0, desc_ptr);
156 	report(lam_active ? !vector : vector == GP_VECTOR,
157 	       "Expected INVPCID with tagged pointer + untagged addr to %s, got vector %u",
158 	       lam_active ? "succeed" : "#GP", vector);
159 }
160 
161 static void __test_lam_sup(void *vaddr, void *vaddr_mmio)
162 {
163 	/* Test for normal memory. */
164 	test_ptr(vaddr, false);
165 	/* Test for MMIO to trigger instruction emulation. */
166 	test_ptr(vaddr_mmio, true);
167 	test_invpcid(vaddr);
168 	test_invlpg(vaddr, false);
169 	if (is_fep_available())
170 		test_invlpg(vaddr, true);
171 }
172 
173 static void test_lam_sup(void)
174 {
175 	void *vaddr, *vaddr_mmio;
176 	phys_addr_t paddr;
177 	unsigned long cr4 = read_cr4();
178 	int vector;
179 
180 	/*
181 	 * KUT initializes vfree_top to 0 for X86_64, and each virtual address
182 	 * allocation decreases the size from vfree_top. It's guaranteed that
183 	 * the return value of alloc_vpage() is considered as kernel mode
184 	 * address and canonical since only a small amount of virtual address
185 	 * range is allocated in this test.
186 	 */
187 	vaddr = alloc_vpage();
188 	vaddr_mmio = alloc_vpage();
189 	paddr = virt_to_phys(alloc_page());
190 	install_page(current_page_table(), paddr, vaddr);
191 	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
192 
193 	test_cr4_lam_set_clear();
194 
195 	/* Test without LAM Supervisor enabled. */
196 	__test_lam_sup(vaddr, vaddr_mmio);
197 
198 	/* Test with LAM Supervisor enabled, if supported. */
199 	if (this_cpu_has(X86_FEATURE_LAM)) {
200 		vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
201 		report(!vector && is_lam_sup_enabled(),
202 		       "Expected CR4.LAM_SUP=1 to succeed");
203 		__test_lam_sup(vaddr, vaddr_mmio);
204 	}
205 }
206 
207 int main(int ac, char **av)
208 {
209 	setup_vm();
210 
211 	if (!this_cpu_has(X86_FEATURE_LAM))
212 		report_info("This CPU doesn't support LAM\n");
213 	else
214 		report_info("This CPU supports LAM\n");
215 
216 	test_lam_sup();
217 
218 	return report_summary();
219 }
220