1*14520f8eSRobert Hoo /* 2*14520f8eSRobert Hoo * Intel LAM unit test 3*14520f8eSRobert Hoo * 4*14520f8eSRobert Hoo * Copyright (C) 2023 Intel 5*14520f8eSRobert Hoo * 6*14520f8eSRobert Hoo * Author: Robert Hoo <robert.hu@linux.intel.com> 7*14520f8eSRobert Hoo * Binbin Wu <binbin.wu@linux.intel.com> 8*14520f8eSRobert Hoo * 9*14520f8eSRobert Hoo * This work is licensed under the terms of the GNU LGPL, version 2 or 10*14520f8eSRobert Hoo * later. 11*14520f8eSRobert Hoo */ 12*14520f8eSRobert Hoo 13*14520f8eSRobert Hoo #include "libcflat.h" 14*14520f8eSRobert Hoo #include "processor.h" 15*14520f8eSRobert Hoo #include "desc.h" 16*14520f8eSRobert Hoo #include "vmalloc.h" 17*14520f8eSRobert Hoo #include "alloc_page.h" 18*14520f8eSRobert Hoo #include "vm.h" 19*14520f8eSRobert Hoo #include "asm/io.h" 20*14520f8eSRobert Hoo #include "ioram.h" 21*14520f8eSRobert Hoo 22*14520f8eSRobert Hoo static void test_cr4_lam_set_clear(void) 23*14520f8eSRobert Hoo { 24*14520f8eSRobert Hoo int vector; 25*14520f8eSRobert Hoo bool has_lam = this_cpu_has(X86_FEATURE_LAM); 26*14520f8eSRobert Hoo 27*14520f8eSRobert Hoo vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP); 28*14520f8eSRobert Hoo report(has_lam ? !vector : vector == GP_VECTOR, 29*14520f8eSRobert Hoo "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP"); 30*14520f8eSRobert Hoo 31*14520f8eSRobert Hoo vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP); 32*14520f8eSRobert Hoo report(!vector, "Expected CR4.LAM_SUP=0 to succeed"); 33*14520f8eSRobert Hoo } 34*14520f8eSRobert Hoo 35*14520f8eSRobert Hoo /* Refer to emulator.c */ 36*14520f8eSRobert Hoo static void do_mov(void *mem) 37*14520f8eSRobert Hoo { 38*14520f8eSRobert Hoo unsigned long t1, t2; 39*14520f8eSRobert Hoo 40*14520f8eSRobert Hoo t1 = 0x123456789abcdefull & -1ul; 41*14520f8eSRobert Hoo asm volatile("mov %[t1], (%[mem])\n\t" 42*14520f8eSRobert Hoo "mov (%[mem]), %[t2]" 43*14520f8eSRobert Hoo : [t2]"=r"(t2) 44*14520f8eSRobert Hoo : [t1]"r"(t1), [mem]"r"(mem) 45*14520f8eSRobert Hoo : "memory"); 46*14520f8eSRobert Hoo report(t1 == t2, "Mov result check"); 47*14520f8eSRobert Hoo } 48*14520f8eSRobert Hoo 49*14520f8eSRobert Hoo static bool get_lam_mask(u64 address, u64* lam_mask) 50*14520f8eSRobert Hoo { 51*14520f8eSRobert Hoo /* 52*14520f8eSRobert Hoo * Use LAM57_MASK as mask to construct non-canonical address if LAM is 53*14520f8eSRobert Hoo * not supported or enabled. 54*14520f8eSRobert Hoo */ 55*14520f8eSRobert Hoo *lam_mask = LAM57_MASK; 56*14520f8eSRobert Hoo 57*14520f8eSRobert Hoo /* 58*14520f8eSRobert Hoo * Bit 63 determines if the address should be treated as a user address 59*14520f8eSRobert Hoo * or a supervisor address. 60*14520f8eSRobert Hoo */ 61*14520f8eSRobert Hoo if (address & BIT_ULL(63)) { 62*14520f8eSRobert Hoo if (!(is_lam_sup_enabled())) 63*14520f8eSRobert Hoo return false; 64*14520f8eSRobert Hoo 65*14520f8eSRobert Hoo if (!is_la57_enabled()) 66*14520f8eSRobert Hoo *lam_mask = LAM48_MASK; 67*14520f8eSRobert Hoo return true; 68*14520f8eSRobert Hoo } 69*14520f8eSRobert Hoo 70*14520f8eSRobert Hoo /* TODO: Get LAM mask for userspace address. */ 71*14520f8eSRobert Hoo return false; 72*14520f8eSRobert Hoo } 73*14520f8eSRobert Hoo 74*14520f8eSRobert Hoo 75*14520f8eSRobert Hoo static void test_ptr(u64* ptr, bool is_mmio) 76*14520f8eSRobert Hoo { 77*14520f8eSRobert Hoo u64 lam_mask; 78*14520f8eSRobert Hoo bool lam_active, fault; 79*14520f8eSRobert Hoo 80*14520f8eSRobert Hoo lam_active = get_lam_mask((u64)ptr, &lam_mask); 81*14520f8eSRobert Hoo 82*14520f8eSRobert Hoo fault = test_for_exception(GP_VECTOR, do_mov, ptr); 83*14520f8eSRobert Hoo report(!fault, "Expected access to untagged address for %s to succeed", 84*14520f8eSRobert Hoo is_mmio ? "MMIO" : "memory"); 85*14520f8eSRobert Hoo 86*14520f8eSRobert Hoo ptr = (u64 *)get_non_canonical((u64)ptr, lam_mask); 87*14520f8eSRobert Hoo fault = test_for_exception(GP_VECTOR, do_mov, ptr); 88*14520f8eSRobert Hoo report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s", 89*14520f8eSRobert Hoo is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without", 90*14520f8eSRobert Hoo lam_active ? "succeed" : "#GP"); 91*14520f8eSRobert Hoo } 92*14520f8eSRobert Hoo 93*14520f8eSRobert Hoo /* invlpg with tagged address is same as NOP, no #GP expected. */ 94*14520f8eSRobert Hoo static void test_invlpg(void *va, bool fep) 95*14520f8eSRobert Hoo { 96*14520f8eSRobert Hoo u64 lam_mask; 97*14520f8eSRobert Hoo u64 *ptr; 98*14520f8eSRobert Hoo 99*14520f8eSRobert Hoo /* 100*14520f8eSRobert Hoo * The return value is not checked, invlpg should never faults no matter 101*14520f8eSRobert Hoo * LAM is supported or not. 102*14520f8eSRobert Hoo */ 103*14520f8eSRobert Hoo get_lam_mask((u64)va, &lam_mask); 104*14520f8eSRobert Hoo ptr = (u64 *)get_non_canonical((u64)va, lam_mask); 105*14520f8eSRobert Hoo if (fep) 106*14520f8eSRobert Hoo asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory"); 107*14520f8eSRobert Hoo else 108*14520f8eSRobert Hoo invlpg(ptr); 109*14520f8eSRobert Hoo 110*14520f8eSRobert Hoo report_pass("Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : ""); 111*14520f8eSRobert Hoo } 112*14520f8eSRobert Hoo 113*14520f8eSRobert Hoo /* LAM doesn't apply to the linear address in the descriptor of invpcid */ 114*14520f8eSRobert Hoo static void test_invpcid(void *data) 115*14520f8eSRobert Hoo { 116*14520f8eSRobert Hoo /* 117*14520f8eSRobert Hoo * Reuse the memory address for the descriptor since stack memory 118*14520f8eSRobert Hoo * address in KUT doesn't follow the kernel address space partitions. 119*14520f8eSRobert Hoo */ 120*14520f8eSRobert Hoo struct invpcid_desc *desc_ptr = data; 121*14520f8eSRobert Hoo int vector; 122*14520f8eSRobert Hoo u64 lam_mask; 123*14520f8eSRobert Hoo bool lam_active; 124*14520f8eSRobert Hoo 125*14520f8eSRobert Hoo if (!this_cpu_has(X86_FEATURE_INVPCID)) { 126*14520f8eSRobert Hoo report_skip("INVPCID not supported"); 127*14520f8eSRobert Hoo return; 128*14520f8eSRobert Hoo } 129*14520f8eSRobert Hoo 130*14520f8eSRobert Hoo lam_active = get_lam_mask((u64)data, &lam_mask); 131*14520f8eSRobert Hoo 132*14520f8eSRobert Hoo memset(desc_ptr, 0, sizeof(struct invpcid_desc)); 133*14520f8eSRobert Hoo desc_ptr->addr = (u64)data; 134*14520f8eSRobert Hoo 135*14520f8eSRobert Hoo vector = invpcid_safe(0, desc_ptr); 136*14520f8eSRobert Hoo report(!vector, 137*14520f8eSRobert Hoo "Expected INVPCID with untagged pointer + untagged addr to succeed, got vector %u", 138*14520f8eSRobert Hoo vector); 139*14520f8eSRobert Hoo 140*14520f8eSRobert Hoo desc_ptr->addr = get_non_canonical(desc_ptr->addr, lam_mask); 141*14520f8eSRobert Hoo vector = invpcid_safe(0, desc_ptr); 142*14520f8eSRobert Hoo report(vector == GP_VECTOR, 143*14520f8eSRobert Hoo "Expected INVPCID with untagged pointer + tagged addr to #GP, got vector %u", 144*14520f8eSRobert Hoo vector); 145*14520f8eSRobert Hoo 146*14520f8eSRobert Hoo desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask); 147*14520f8eSRobert Hoo vector = invpcid_safe(0, desc_ptr); 148*14520f8eSRobert Hoo report(vector == GP_VECTOR, 149*14520f8eSRobert Hoo "Expected INVPCID with tagged pointer + tagged addr to #GP, got vector %u", 150*14520f8eSRobert Hoo vector); 151*14520f8eSRobert Hoo 152*14520f8eSRobert Hoo desc_ptr = data; 153*14520f8eSRobert Hoo desc_ptr->addr = (u64)data; 154*14520f8eSRobert Hoo desc_ptr = (void *)get_non_canonical((u64)desc_ptr, lam_mask); 155*14520f8eSRobert Hoo vector = invpcid_safe(0, desc_ptr); 156*14520f8eSRobert Hoo report(lam_active ? !vector : vector == GP_VECTOR, 157*14520f8eSRobert Hoo "Expected INVPCID with tagged pointer + untagged addr to %s, got vector %u", 158*14520f8eSRobert Hoo lam_active ? "succeed" : "#GP", vector); 159*14520f8eSRobert Hoo } 160*14520f8eSRobert Hoo 161*14520f8eSRobert Hoo static void __test_lam_sup(void *vaddr, void *vaddr_mmio) 162*14520f8eSRobert Hoo { 163*14520f8eSRobert Hoo /* Test for normal memory. */ 164*14520f8eSRobert Hoo test_ptr(vaddr, false); 165*14520f8eSRobert Hoo /* Test for MMIO to trigger instruction emulation. */ 166*14520f8eSRobert Hoo test_ptr(vaddr_mmio, true); 167*14520f8eSRobert Hoo test_invpcid(vaddr); 168*14520f8eSRobert Hoo test_invlpg(vaddr, false); 169*14520f8eSRobert Hoo if (is_fep_available()) 170*14520f8eSRobert Hoo test_invlpg(vaddr, true); 171*14520f8eSRobert Hoo } 172*14520f8eSRobert Hoo 173*14520f8eSRobert Hoo static void test_lam_sup(void) 174*14520f8eSRobert Hoo { 175*14520f8eSRobert Hoo void *vaddr, *vaddr_mmio; 176*14520f8eSRobert Hoo phys_addr_t paddr; 177*14520f8eSRobert Hoo unsigned long cr4 = read_cr4(); 178*14520f8eSRobert Hoo int vector; 179*14520f8eSRobert Hoo 180*14520f8eSRobert Hoo /* 181*14520f8eSRobert Hoo * KUT initializes vfree_top to 0 for X86_64, and each virtual address 182*14520f8eSRobert Hoo * allocation decreases the size from vfree_top. It's guaranteed that 183*14520f8eSRobert Hoo * the return value of alloc_vpage() is considered as kernel mode 184*14520f8eSRobert Hoo * address and canonical since only a small amount of virtual address 185*14520f8eSRobert Hoo * range is allocated in this test. 186*14520f8eSRobert Hoo */ 187*14520f8eSRobert Hoo vaddr = alloc_vpage(); 188*14520f8eSRobert Hoo vaddr_mmio = alloc_vpage(); 189*14520f8eSRobert Hoo paddr = virt_to_phys(alloc_page()); 190*14520f8eSRobert Hoo install_page(current_page_table(), paddr, vaddr); 191*14520f8eSRobert Hoo install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio); 192*14520f8eSRobert Hoo 193*14520f8eSRobert Hoo test_cr4_lam_set_clear(); 194*14520f8eSRobert Hoo 195*14520f8eSRobert Hoo /* Test without LAM Supervisor enabled. */ 196*14520f8eSRobert Hoo __test_lam_sup(vaddr, vaddr_mmio); 197*14520f8eSRobert Hoo 198*14520f8eSRobert Hoo /* Test with LAM Supervisor enabled, if supported. */ 199*14520f8eSRobert Hoo if (this_cpu_has(X86_FEATURE_LAM)) { 200*14520f8eSRobert Hoo vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP); 201*14520f8eSRobert Hoo report(!vector && is_lam_sup_enabled(), 202*14520f8eSRobert Hoo "Expected CR4.LAM_SUP=1 to succeed"); 203*14520f8eSRobert Hoo __test_lam_sup(vaddr, vaddr_mmio); 204*14520f8eSRobert Hoo } 205*14520f8eSRobert Hoo } 206*14520f8eSRobert Hoo 207*14520f8eSRobert Hoo int main(int ac, char **av) 208*14520f8eSRobert Hoo { 209*14520f8eSRobert Hoo setup_vm(); 210*14520f8eSRobert Hoo 211*14520f8eSRobert Hoo if (!this_cpu_has(X86_FEATURE_LAM)) 212*14520f8eSRobert Hoo report_info("This CPU doesn't support LAM\n"); 213*14520f8eSRobert Hoo else 214*14520f8eSRobert Hoo report_info("This CPU supports LAM\n"); 215*14520f8eSRobert Hoo 216*14520f8eSRobert Hoo test_lam_sup(); 217*14520f8eSRobert Hoo 218*14520f8eSRobert Hoo return report_summary(); 219*14520f8eSRobert Hoo } 220