1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * EDAT test. 4 * 5 * Copyright (c) 2021 IBM Corp 6 * 7 * Authors: 8 * Claudio Imbrenda <imbrenda@linux.ibm.com> 9 */ 10 #include <libcflat.h> 11 #include <vmalloc.h> 12 #include <asm/facility.h> 13 #include <asm/interrupt.h> 14 #include <mmu.h> 15 #include <asm/pgtable.h> 16 #include <asm-generic/barrier.h> 17 18 #define PGD_PAGE_SHIFT (REGION1_SHIFT - PAGE_SHIFT) 19 20 #define LC_SIZE (2 * PAGE_SIZE) 21 #define VIRT(x) ((void *)((unsigned long)(x) + (unsigned long)mem)) 22 23 static uint8_t prefix_buf[LC_SIZE] __attribute__((aligned(LC_SIZE))); 24 static unsigned int tmp[1024] __attribute__((aligned(PAGE_SIZE))); 25 static void *root, *mem, *m; 26 static struct lowcore *lc; 27 volatile unsigned int *p; 28 29 /* 30 * Check if a non-access-list protection exception happened for the given 31 * address, in the primary address space. 32 */ 33 static bool check_pgm_prot(void *ptr) 34 { 35 union teid teid; 36 37 if (lc->pgm_int_code != PGM_INT_CODE_PROTECTION) 38 return false; 39 40 teid.val = lc->trans_exc_id; 41 42 /* 43 * depending on the presence of the ESOP feature, the rest of the 44 * field might or might not be meaningful when the m field is 0. 45 */ 46 if (!teid.m) 47 return true; 48 return (!teid.acc_list_prot && !teid.asce_id && 49 (teid.addr == ((unsigned long)ptr >> PAGE_SHIFT))); 50 } 51 52 static void test_dat(void) 53 { 54 report_prefix_push("edat off"); 55 /* disable EDAT */ 56 ctl_clear_bit(0, CTL0_EDAT); 57 58 /* Check some basics */ 59 p[0] = 42; 60 report(p[0] == 42, "pte, r/w"); 61 p[0] = 0; 62 63 /* Write protect the page and try to write, expect a fault */ 64 protect_page(m, PAGE_ENTRY_P); 65 expect_pgm_int(); 66 p[0] = 42; 67 unprotect_page(m, PAGE_ENTRY_P); 68 report(!p[0] && check_pgm_prot(m), "pte, ro"); 69 70 /* 71 * The FC bit (for large pages) should be ignored because EDAT is 72 * off. We set a value and then we try to read it back again after 73 * setting the FC bit. This way we can check if large pages were 74 * erroneously enabled despite EDAT being off. 75 */ 76 p[0] = 42; 77 protect_dat_entry(m, SEGMENT_ENTRY_FC, pgtable_level_pmd); 78 report(p[0] == 42, "pmd, fc=1, r/w"); 79 unprotect_dat_entry(m, SEGMENT_ENTRY_FC, pgtable_level_pmd); 80 p[0] = 0; 81 82 /* 83 * Segment protection should work even with EDAT off, try to write 84 * anyway and expect a fault 85 */ 86 protect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd); 87 expect_pgm_int(); 88 p[0] = 42; 89 report(!p[0] && check_pgm_prot(m), "pmd, ro"); 90 unprotect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd); 91 92 /* The FC bit should be ignored because EDAT is off, like above */ 93 p[0] = 42; 94 protect_dat_entry(m, REGION3_ENTRY_FC, pgtable_level_pud); 95 report(p[0] == 42, "pud, fc=1, r/w"); 96 unprotect_dat_entry(m, REGION3_ENTRY_FC, pgtable_level_pud); 97 p[0] = 0; 98 99 /* 100 * Region1/2/3 protection should not work, because EDAT is off. 101 * Protect the various region1/2/3 entries and write, expect the 102 * write to be successful. 103 */ 104 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud); 105 p[0] = 42; 106 report(p[0] == 42, "pud, ro"); 107 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud); 108 p[0] = 0; 109 110 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d); 111 p[0] = 42; 112 report(p[0] == 42, "p4d, ro"); 113 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d); 114 p[0] = 0; 115 116 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd); 117 p[0] = 42; 118 report(p[0] == 42, "pgd, ro"); 119 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd); 120 p[0] = 0; 121 122 report_prefix_pop(); 123 } 124 125 static void test_edat1(void) 126 { 127 report_prefix_push("edat1"); 128 /* Enable EDAT */ 129 ctl_set_bit(0, CTL0_EDAT); 130 p[0] = 0; 131 132 /* 133 * Segment protection should work normally, try to write and expect 134 * a fault. 135 */ 136 expect_pgm_int(); 137 protect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd); 138 p[0] = 42; 139 report(!p[0] && check_pgm_prot(m), "pmd, ro"); 140 unprotect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd); 141 142 /* 143 * Region1/2/3 protection should work now, because EDAT is on. Try 144 * to write anyway and expect a fault. 145 */ 146 expect_pgm_int(); 147 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud); 148 p[0] = 42; 149 report(!p[0] && check_pgm_prot(m), "pud, ro"); 150 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud); 151 152 expect_pgm_int(); 153 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d); 154 p[0] = 42; 155 report(!p[0] && check_pgm_prot(m), "p4d, ro"); 156 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d); 157 158 expect_pgm_int(); 159 protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd); 160 p[0] = 42; 161 report(!p[0] && check_pgm_prot(m), "pgd, ro"); 162 unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd); 163 164 /* Large pages should work */ 165 p[0] = 42; 166 install_large_page(root, 0, mem); 167 report(p[0] == 42, "pmd, large"); 168 169 /* 170 * Prefixing should not work with large pages. Since the lower 171 * addresses are mapped with small pages, which are subject to 172 * prefixing, and the pages mapped with large pages are not subject 173 * to prefixing, this is the resulting scenario: 174 * 175 * virtual 0 = real 0 -> absolute prefix_buf 176 * virtual prefix_buf = real prefix_buf -> absolute 0 177 * VIRT(0) -> absolute 0 178 * VIRT(prefix_buf) -> absolute prefix_buf 179 * 180 * The testcase checks if the memory at virtual 0 has the same 181 * content as the memory at VIRT(prefix_buf) and the memory at 182 * VIRT(0) has the same content as the memory at virtual prefix_buf. 183 * If prefixing is erroneously applied for large pages, the testcase 184 * will therefore fail. 185 */ 186 report(!memcmp(0, VIRT(prefix_buf), LC_SIZE) && 187 !memcmp(prefix_buf, VIRT(0), LC_SIZE), 188 "pmd, large, prefixing"); 189 190 report_prefix_pop(); 191 } 192 193 static void test_edat2(void) 194 { 195 report_prefix_push("edat2"); 196 p[0] = 42; 197 198 /* Huge pages should work */ 199 install_huge_page(root, 0, mem); 200 report(p[0] == 42, "pud, huge"); 201 202 /* Prefixing should not work with huge pages, just like large pages */ 203 report(!memcmp(0, VIRT(prefix_buf), LC_SIZE) && 204 !memcmp(prefix_buf, VIRT(0), LC_SIZE), 205 "pmd, large, prefixing"); 206 207 report_prefix_pop(); 208 } 209 210 static unsigned int setup(void) 211 { 212 bool has_edat1 = test_facility(8); 213 bool has_edat2 = test_facility(78); 214 unsigned long pa, va; 215 216 if (has_edat2 && !has_edat1) 217 report_abort("EDAT2 available, but EDAT1 not available"); 218 219 /* Setup DAT 1:1 mapping and memory management */ 220 setup_vm(); 221 root = (void *)(stctg(1) & PAGE_MASK); 222 223 /* 224 * Get a pgd worth of virtual memory, so we can test things later 225 * without interfering with the test code or the interrupt handler 226 */ 227 mem = alloc_vpages_aligned(BIT_ULL(PGD_PAGE_SHIFT), PGD_PAGE_SHIFT); 228 assert(mem); 229 va = (unsigned long)mem; 230 231 /* Map the first 1GB of real memory */ 232 for (pa = 0; pa < SZ_1G; pa += PAGE_SIZE, va += PAGE_SIZE) 233 install_page(root, pa, (void *)va); 234 235 /* 236 * Move the lowcore to a known non-zero location. This is needed 237 * later to check whether prefixing is working with large pages. 238 */ 239 assert((unsigned long)&prefix_buf < SZ_2G); 240 memcpy(prefix_buf, 0, LC_SIZE); 241 set_prefix((uint32_t)(uintptr_t)prefix_buf); 242 /* Clear the old copy */ 243 memset(prefix_buf, 0, LC_SIZE); 244 245 /* m will point to tmp through the new virtual mapping */ 246 m = VIRT(&tmp); 247 /* p is the same as m but volatile */ 248 p = (volatile unsigned int *)m; 249 250 return has_edat1 + has_edat2; 251 } 252 253 int main(void) 254 { 255 unsigned int edat; 256 257 report_prefix_push("edat"); 258 edat = setup(); 259 260 test_dat(); 261 262 if (edat) 263 test_edat1(); 264 else 265 report_skip("EDAT not available"); 266 267 if (edat >= 2) 268 test_edat2(); 269 else 270 report_skip("EDAT2 not available"); 271 272 report_prefix_pop(); 273 return report_summary(); 274 } 275