xref: /kvm-unit-tests/x86/lam.c (revision dca3f4c041143c8e8dc70c6890a19a5730310230)
114520f8eSRobert Hoo /*
214520f8eSRobert Hoo  * Intel LAM unit test
314520f8eSRobert Hoo  *
414520f8eSRobert Hoo  * Copyright (C) 2023 Intel
514520f8eSRobert Hoo  *
614520f8eSRobert Hoo  * Author: Robert Hoo <robert.hu@linux.intel.com>
714520f8eSRobert Hoo  *         Binbin Wu <binbin.wu@linux.intel.com>
814520f8eSRobert Hoo  *
914520f8eSRobert Hoo  * This work is licensed under the terms of the GNU LGPL, version 2 or
1014520f8eSRobert Hoo  * later.
1114520f8eSRobert Hoo  */
1214520f8eSRobert Hoo 
1314520f8eSRobert Hoo #include "libcflat.h"
1414520f8eSRobert Hoo #include "processor.h"
1514520f8eSRobert Hoo #include "desc.h"
1614520f8eSRobert Hoo #include "vmalloc.h"
1714520f8eSRobert Hoo #include "alloc_page.h"
1814520f8eSRobert Hoo #include "vm.h"
1914520f8eSRobert Hoo #include "asm/io.h"
2014520f8eSRobert Hoo #include "ioram.h"
2114520f8eSRobert Hoo 
test_cr4_lam_set_clear(void)2214520f8eSRobert Hoo static void test_cr4_lam_set_clear(void)
2314520f8eSRobert Hoo {
2414520f8eSRobert Hoo 	int vector;
2514520f8eSRobert Hoo 	bool has_lam = this_cpu_has(X86_FEATURE_LAM);
2614520f8eSRobert Hoo 
2714520f8eSRobert Hoo 	vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
2814520f8eSRobert Hoo 	report(has_lam ? !vector : vector == GP_VECTOR,
2914520f8eSRobert Hoo 	       "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP");
3014520f8eSRobert Hoo 
3114520f8eSRobert Hoo 	vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
3214520f8eSRobert Hoo 	report(!vector, "Expected CR4.LAM_SUP=0 to succeed");
3314520f8eSRobert Hoo }
3414520f8eSRobert Hoo 
3514520f8eSRobert Hoo /* Refer to emulator.c */
do_mov(void * mem)3614520f8eSRobert Hoo static void do_mov(void *mem)
3714520f8eSRobert Hoo {
3814520f8eSRobert Hoo 	unsigned long t1, t2;
3914520f8eSRobert Hoo 
4014520f8eSRobert Hoo 	t1 = 0x123456789abcdefull & -1ul;
4114520f8eSRobert Hoo 	asm volatile("mov %[t1], (%[mem])\n\t"
4214520f8eSRobert Hoo 		     "mov (%[mem]), %[t2]"
4314520f8eSRobert Hoo 		     : [t2]"=r"(t2)
4414520f8eSRobert Hoo 		     : [t1]"r"(t1), [mem]"r"(mem)
4514520f8eSRobert Hoo 		     : "memory");
4614520f8eSRobert Hoo 	report(t1 == t2, "Mov result check");
4714520f8eSRobert Hoo }
4814520f8eSRobert Hoo 
get_lam_mask(u64 address,u64 * lam_mask)4914520f8eSRobert Hoo static bool get_lam_mask(u64 address, u64* lam_mask)
5014520f8eSRobert Hoo {
5114520f8eSRobert Hoo 	/*
5214520f8eSRobert Hoo 	 * Use LAM57_MASK as mask to construct non-canonical address if LAM is
5314520f8eSRobert Hoo 	 * not supported or enabled.
5414520f8eSRobert Hoo 	 */
5514520f8eSRobert Hoo 	*lam_mask = LAM57_MASK;
5614520f8eSRobert Hoo 
5714520f8eSRobert Hoo 	/*
5814520f8eSRobert Hoo 	 * Bit 63 determines if the address should be treated as a user address
5914520f8eSRobert Hoo 	 * or a supervisor address.
6014520f8eSRobert Hoo 	 */
6114520f8eSRobert Hoo 	if (address & BIT_ULL(63)) {
6214520f8eSRobert Hoo 		if (!(is_lam_sup_enabled()))
6314520f8eSRobert Hoo 			return false;
6414520f8eSRobert Hoo 
6514520f8eSRobert Hoo 		if (!is_la57_enabled())
6614520f8eSRobert Hoo 			*lam_mask = LAM48_MASK;
6714520f8eSRobert Hoo 		return true;
6814520f8eSRobert Hoo 	}
6914520f8eSRobert Hoo 
70*0164d759SBinbin Wu 	if(is_lam_u48_enabled()) {
71*0164d759SBinbin Wu 		*lam_mask = LAM48_MASK;
72*0164d759SBinbin Wu 		return true;
73*0164d759SBinbin Wu 	}
74*0164d759SBinbin Wu 
75*0164d759SBinbin Wu 	if(is_lam_u57_enabled())
76*0164d759SBinbin Wu 		return true;
77*0164d759SBinbin Wu 
7814520f8eSRobert Hoo 	return false;
7914520f8eSRobert Hoo }
8014520f8eSRobert Hoo 
8114520f8eSRobert Hoo 
test_ptr(u64 * ptr,bool is_mmio)8214520f8eSRobert Hoo static void test_ptr(u64* ptr, bool is_mmio)
8314520f8eSRobert Hoo {
8414520f8eSRobert Hoo 	u64 lam_mask;
8514520f8eSRobert Hoo 	bool lam_active, fault;
8614520f8eSRobert Hoo 
8714520f8eSRobert Hoo 	lam_active = get_lam_mask((u64)ptr, &lam_mask);
8814520f8eSRobert Hoo 
8914520f8eSRobert Hoo 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
9014520f8eSRobert Hoo 	report(!fault, "Expected access to untagged address for %s to succeed",
9114520f8eSRobert Hoo 	       is_mmio ? "MMIO" : "memory");
9214520f8eSRobert Hoo 
9314520f8eSRobert Hoo 	ptr = (u64 *)get_non_canonical((u64)ptr, lam_mask);
9414520f8eSRobert Hoo 	fault = test_for_exception(GP_VECTOR, do_mov, ptr);
9514520f8eSRobert Hoo 	report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s",
9614520f8eSRobert Hoo 	       is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without",
9714520f8eSRobert Hoo 	       lam_active ? "succeed" : "#GP");
98*0164d759SBinbin Wu 
99*0164d759SBinbin Wu 	/*
100*0164d759SBinbin Wu 	 * This test case is only triggered when LAM_U57 is active and 4-level
101*0164d759SBinbin Wu 	 * paging is used. For the case, bit[56:47] aren't all 0 triggers #GP.
102*0164d759SBinbin Wu 	 */
103*0164d759SBinbin Wu 	if (lam_active && (lam_mask == LAM57_MASK) && !is_la57_enabled()) {
104*0164d759SBinbin Wu 		ptr = (u64 *)get_non_canonical((u64)ptr, LAM48_MASK);
105*0164d759SBinbin Wu 		fault = test_for_exception(GP_VECTOR, do_mov, ptr);
106*0164d759SBinbin Wu 		report(fault, "Expected access to non-LAM-canonical address for %s to #GP",
107*0164d759SBinbin Wu 		       is_mmio ? "MMIO" : "memory");
108*0164d759SBinbin Wu 	}
10914520f8eSRobert Hoo }
11014520f8eSRobert Hoo 
11114520f8eSRobert Hoo /* invlpg with tagged address is same as NOP, no #GP expected. */
test_invlpg(void * va,bool fep)11214520f8eSRobert Hoo static void test_invlpg(void *va, bool fep)
11314520f8eSRobert Hoo {
11414520f8eSRobert Hoo 	u64 lam_mask;
11514520f8eSRobert Hoo 	u64 *ptr;
11614520f8eSRobert Hoo 
11714520f8eSRobert Hoo 	/*
11814520f8eSRobert Hoo 	 * The return value is not checked, invlpg should never faults no matter
11914520f8eSRobert Hoo 	 * LAM is supported or not.
12014520f8eSRobert Hoo 	 */
12114520f8eSRobert Hoo 	get_lam_mask((u64)va, &lam_mask);
12214520f8eSRobert Hoo 	ptr = (u64 *)get_non_canonical((u64)va, lam_mask);
12314520f8eSRobert Hoo 	if (fep)
12414520f8eSRobert Hoo 		asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory");
12514520f8eSRobert Hoo 	else
12614520f8eSRobert Hoo 		invlpg(ptr);
12714520f8eSRobert Hoo 
12814520f8eSRobert Hoo 	report_pass("Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : "");
12914520f8eSRobert Hoo }
13014520f8eSRobert Hoo 
13114520f8eSRobert Hoo /* LAM doesn't apply to the linear address in the descriptor of invpcid */
test_invpcid(void * data)13214520f8eSRobert Hoo static void test_invpcid(void *data)
13314520f8eSRobert Hoo {
13414520f8eSRobert Hoo 	/*
13514520f8eSRobert Hoo 	 * Reuse the memory address for the descriptor since stack memory
13614520f8eSRobert Hoo 	 * address in KUT doesn't follow the kernel address space partitions.
13714520f8eSRobert Hoo 	 */
13814520f8eSRobert Hoo 	struct invpcid_desc *desc_ptr = data;
13914520f8eSRobert Hoo 	int vector;
14014520f8eSRobert Hoo 	u64 lam_mask;
14114520f8eSRobert Hoo 	bool lam_active;
14214520f8eSRobert Hoo 
14314520f8eSRobert Hoo 	if (!this_cpu_has(X86_FEATURE_INVPCID)) {
14414520f8eSRobert Hoo 		report_skip("INVPCID not supported");
14514520f8eSRobert Hoo 		return;
14614520f8eSRobert Hoo 	}
14714520f8eSRobert Hoo 
14814520f8eSRobert Hoo 	lam_active = get_lam_mask((u64)data, &lam_mask);
14914520f8eSRobert Hoo 
15014520f8eSRobert Hoo 	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
15114520f8eSRobert Hoo 	desc_ptr->addr = (u64)data;
15214520f8eSRobert Hoo 
15314520f8eSRobert Hoo 	vector = invpcid_safe(0, desc_ptr);
15414520f8eSRobert Hoo 	report(!vector,
15514520f8eSRobert Hoo 	       "Expected INVPCID with untagged pointer + untagged addr to succeed, got vector %u",
15614520f8eSRobert Hoo 	       vector);
15714520f8eSRobert Hoo 
15814520f8eSRobert Hoo 	desc_ptr->addr = get_non_canonical(desc_ptr->addr, lam_mask);
15914520f8eSRobert Hoo 	vector = invpcid_safe(0, desc_ptr);
16014520f8eSRobert Hoo 	report(vector == GP_VECTOR,
16114520f8eSRobert Hoo 	       "Expected INVPCID with untagged pointer + tagged addr to #GP, got vector %u",
16214520f8eSRobert Hoo 	       vector);
16314520f8eSRobert Hoo 
16414520f8eSRobert Hoo 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
16514520f8eSRobert Hoo 	vector = invpcid_safe(0, desc_ptr);
16614520f8eSRobert Hoo 	report(vector == GP_VECTOR,
16714520f8eSRobert Hoo 	       "Expected INVPCID with tagged pointer + tagged addr to #GP, got vector %u",
16814520f8eSRobert Hoo 	       vector);
16914520f8eSRobert Hoo 
17014520f8eSRobert Hoo 	desc_ptr = data;
17114520f8eSRobert Hoo 	desc_ptr->addr = (u64)data;
17214520f8eSRobert Hoo 	desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask);
17314520f8eSRobert Hoo 	vector = invpcid_safe(0, desc_ptr);
17414520f8eSRobert Hoo 	report(lam_active ? !vector : vector == GP_VECTOR,
17514520f8eSRobert Hoo 	       "Expected INVPCID with tagged pointer + untagged addr to %s, got vector %u",
17614520f8eSRobert Hoo 	       lam_active ? "succeed" : "#GP", vector);
17714520f8eSRobert Hoo }
17814520f8eSRobert Hoo 
__test_lam_sup(void * vaddr,void * vaddr_mmio)17914520f8eSRobert Hoo static void __test_lam_sup(void *vaddr, void *vaddr_mmio)
18014520f8eSRobert Hoo {
18114520f8eSRobert Hoo 	/* Test for normal memory. */
18214520f8eSRobert Hoo 	test_ptr(vaddr, false);
18314520f8eSRobert Hoo 	/* Test for MMIO to trigger instruction emulation. */
18414520f8eSRobert Hoo 	test_ptr(vaddr_mmio, true);
18514520f8eSRobert Hoo 	test_invpcid(vaddr);
18614520f8eSRobert Hoo 	test_invlpg(vaddr, false);
18714520f8eSRobert Hoo 	if (is_fep_available())
18814520f8eSRobert Hoo 		test_invlpg(vaddr, true);
18914520f8eSRobert Hoo }
19014520f8eSRobert Hoo 
test_lam_sup(void)19114520f8eSRobert Hoo static void test_lam_sup(void)
19214520f8eSRobert Hoo {
19314520f8eSRobert Hoo 	void *vaddr, *vaddr_mmio;
19414520f8eSRobert Hoo 	phys_addr_t paddr;
19514520f8eSRobert Hoo 	unsigned long cr4 = read_cr4();
19614520f8eSRobert Hoo 	int vector;
19714520f8eSRobert Hoo 
19814520f8eSRobert Hoo 	/*
19914520f8eSRobert Hoo 	 * KUT initializes vfree_top to 0 for X86_64, and each virtual address
20014520f8eSRobert Hoo 	 * allocation decreases the size from vfree_top. It's guaranteed that
20114520f8eSRobert Hoo 	 * the return value of alloc_vpage() is considered as kernel mode
20214520f8eSRobert Hoo 	 * address and canonical since only a small amount of virtual address
20314520f8eSRobert Hoo 	 * range is allocated in this test.
20414520f8eSRobert Hoo 	 */
20514520f8eSRobert Hoo 	vaddr = alloc_vpage();
20614520f8eSRobert Hoo 	vaddr_mmio = alloc_vpage();
20714520f8eSRobert Hoo 	paddr = virt_to_phys(alloc_page());
20814520f8eSRobert Hoo 	install_page(current_page_table(), paddr, vaddr);
20914520f8eSRobert Hoo 	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
21014520f8eSRobert Hoo 
21114520f8eSRobert Hoo 	test_cr4_lam_set_clear();
21214520f8eSRobert Hoo 
21314520f8eSRobert Hoo 	/* Test without LAM Supervisor enabled. */
21414520f8eSRobert Hoo 	__test_lam_sup(vaddr, vaddr_mmio);
21514520f8eSRobert Hoo 
21614520f8eSRobert Hoo 	/* Test with LAM Supervisor enabled, if supported. */
21714520f8eSRobert Hoo 	if (this_cpu_has(X86_FEATURE_LAM)) {
21814520f8eSRobert Hoo 		vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
21914520f8eSRobert Hoo 		report(!vector && is_lam_sup_enabled(),
22014520f8eSRobert Hoo 		       "Expected CR4.LAM_SUP=1 to succeed");
22114520f8eSRobert Hoo 		__test_lam_sup(vaddr, vaddr_mmio);
22214520f8eSRobert Hoo 	}
22314520f8eSRobert Hoo }
22414520f8eSRobert Hoo 
test_lam_user(void)225*0164d759SBinbin Wu static void test_lam_user(void)
226*0164d759SBinbin Wu {
227*0164d759SBinbin Wu 	void* vaddr;
228*0164d759SBinbin Wu 	int vector;
229*0164d759SBinbin Wu 	unsigned long cr3 = read_cr3() & ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
230*0164d759SBinbin Wu 	bool has_lam = this_cpu_has(X86_FEATURE_LAM);
231*0164d759SBinbin Wu 
232*0164d759SBinbin Wu 	/*
233*0164d759SBinbin Wu 	 * The physical address of AREA_NORMAL is within 36 bits, so that using
234*0164d759SBinbin Wu 	 * identical mapping, the linear address will be considered as user mode
235*0164d759SBinbin Wu 	 * address from the view of LAM, and the metadata bits are not used as
236*0164d759SBinbin Wu 	 * address for both LAM48 and LAM57.
237*0164d759SBinbin Wu 	 */
238*0164d759SBinbin Wu 	vaddr = alloc_pages_flags(0, AREA_NORMAL);
239*0164d759SBinbin Wu 	_Static_assert((AREA_NORMAL_PFN & GENMASK(63, 47)) == 0UL,
240*0164d759SBinbin Wu 			"Identical mapping range check");
241*0164d759SBinbin Wu 
242*0164d759SBinbin Wu 	/*
243*0164d759SBinbin Wu 	 * Note, LAM doesn't have a global control bit to turn on/off LAM
244*0164d759SBinbin Wu 	 * completely, but purely depends on hardware's CPUID to determine it
245*0164d759SBinbin Wu 	 * can be enabled or not. That means, when EPT is on, even when KVM
246*0164d759SBinbin Wu 	 * doesn't expose LAM to guest, the guest can still set LAM control bits
247*0164d759SBinbin Wu 	 * in CR3 w/o causing problem. This is an unfortunate virtualization
248*0164d759SBinbin Wu 	 * hole. KVM doesn't choose to intercept CR3 in this case for
249*0164d759SBinbin Wu 	 * performance.
250*0164d759SBinbin Wu 	 * Only enable LAM CR3 bits when LAM feature is exposed.
251*0164d759SBinbin Wu 	 */
252*0164d759SBinbin Wu 	if (has_lam) {
253*0164d759SBinbin Wu 		vector = write_cr3_safe(cr3 | X86_CR3_LAM_U48);
254*0164d759SBinbin Wu 		report(!vector && is_lam_u48_enabled(), "Expected CR3.LAM_U48=1 to succeed");
255*0164d759SBinbin Wu 	}
256*0164d759SBinbin Wu 	/*
257*0164d759SBinbin Wu 	 * Physical memory & MMIO have already been identical mapped in
258*0164d759SBinbin Wu 	 * setup_mmu().
259*0164d759SBinbin Wu 	 */
260*0164d759SBinbin Wu 	test_ptr(vaddr, false);
261*0164d759SBinbin Wu 	test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
262*0164d759SBinbin Wu 
263*0164d759SBinbin Wu 	if (has_lam) {
264*0164d759SBinbin Wu 		vector = write_cr3_safe(cr3 | X86_CR3_LAM_U57);
265*0164d759SBinbin Wu 		report(!vector && is_lam_u57_enabled(), "Expected CR3.LAM_U57=1 to succeed");
266*0164d759SBinbin Wu 
267*0164d759SBinbin Wu 		/* If !has_lam, it has been tested above, no need to test again. */
268*0164d759SBinbin Wu 		test_ptr(vaddr, false);
269*0164d759SBinbin Wu 		test_ptr(phys_to_virt(IORAM_BASE_PHYS), true);
270*0164d759SBinbin Wu 	}
271*0164d759SBinbin Wu }
272*0164d759SBinbin Wu 
main(int ac,char ** av)27314520f8eSRobert Hoo int main(int ac, char **av)
27414520f8eSRobert Hoo {
27514520f8eSRobert Hoo 	setup_vm();
27614520f8eSRobert Hoo 
27714520f8eSRobert Hoo 	if (!this_cpu_has(X86_FEATURE_LAM))
27814520f8eSRobert Hoo 		report_info("This CPU doesn't support LAM\n");
27914520f8eSRobert Hoo 	else
28014520f8eSRobert Hoo 		report_info("This CPU supports LAM\n");
28114520f8eSRobert Hoo 
28214520f8eSRobert Hoo 	test_lam_sup();
283*0164d759SBinbin Wu 	test_lam_user();
28414520f8eSRobert Hoo 
28514520f8eSRobert Hoo 	return report_summary();
28614520f8eSRobert Hoo }
287