xref: /kvm-unit-tests/s390x/edat.c (revision 1f08a91a41402b0e032ecce8ed1b5952cbfca0ea)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * EDAT test.
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *	Claudio Imbrenda <imbrenda@linux.ibm.com>
9  */
10 #include <libcflat.h>
11 #include <vmalloc.h>
12 #include <asm/facility.h>
13 #include <asm/interrupt.h>
14 #include <mmu.h>
15 #include <asm/pgtable.h>
16 #include <asm-generic/barrier.h>
17 
18 #define PGD_PAGE_SHIFT (REGION1_SHIFT - PAGE_SHIFT)
19 
20 #define VIRT(x)	((void *)((unsigned long)(x) + (unsigned long)mem))
21 
22 static uint8_t prefix_buf[LC_SIZE] __attribute__((aligned(LC_SIZE)));
23 static unsigned int tmp[1024] __attribute__((aligned(PAGE_SIZE)));
24 static void *root, *mem, *m;
25 volatile unsigned int *p;
26 
27 /*
28  * Check if the exception is consistent with DAT protection and has the correct
29  * address and primary address space.
30  */
check_pgm_prot(void * ptr)31 static bool check_pgm_prot(void *ptr)
32 {
33 	union teid teid;
34 
35 	if (lowcore.pgm_int_code != PGM_INT_CODE_PROTECTION)
36 		return false;
37 
38 	teid.val = lowcore.trans_exc_id;
39 	switch (get_supp_on_prot_facility()) {
40 	case SOP_NONE:
41 	case SOP_BASIC:
42 		assert(false); /* let's ignore ancient/irrelevant machines */
43 	case SOP_ENHANCED_1:
44 		if (!teid.sop_teid_predictable) /* implies key or low addr */
45 			return false;
46 		break;
47 	case SOP_ENHANCED_2:
48 		if (teid_esop2_prot_code(teid) != PROT_DAT)
49 			return false;
50 	}
51 	return (!teid.sop_acc_list && !teid.asce_id &&
52 		(teid.addr == ((unsigned long)ptr >> PAGE_SHIFT)));
53 }
54 
test_dat(void)55 static void test_dat(void)
56 {
57 	report_prefix_push("edat off");
58 	/* disable EDAT */
59 	ctl_clear_bit(0, CTL0_EDAT);
60 
61 	/* Check some basics */
62 	p[0] = 42;
63 	report(p[0] == 42, "pte, r/w");
64 	p[0] = 0;
65 
66 	/* Write protect the page and try to write, expect a fault */
67 	protect_page(m, PAGE_ENTRY_P);
68 	expect_pgm_int();
69 	p[0] = 42;
70 	unprotect_page(m, PAGE_ENTRY_P);
71 	report(!p[0] && check_pgm_prot(m), "pte, ro");
72 
73 	/*
74 	 * The FC bit (for large pages) should be ignored because EDAT is
75 	 * off. We set a value and then we try to read it back again after
76 	 * setting the FC bit. This way we can check if large pages were
77 	 * erroneously enabled despite EDAT being off.
78 	 */
79 	p[0] = 42;
80 	protect_dat_entry(m, SEGMENT_ENTRY_FC, pgtable_level_pmd);
81 	report(p[0] == 42, "pmd, fc=1, r/w");
82 	unprotect_dat_entry(m, SEGMENT_ENTRY_FC, pgtable_level_pmd);
83 	p[0] = 0;
84 
85 	/*
86 	 * Segment protection should work even with EDAT off, try to write
87 	 * anyway and expect a fault
88 	 */
89 	protect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd);
90 	expect_pgm_int();
91 	p[0] = 42;
92 	report(!p[0] && check_pgm_prot(m), "pmd, ro");
93 	unprotect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd);
94 
95 	/* The FC bit should be ignored because EDAT is off, like above */
96 	p[0] = 42;
97 	protect_dat_entry(m, REGION3_ENTRY_FC, pgtable_level_pud);
98 	report(p[0] == 42, "pud, fc=1, r/w");
99 	unprotect_dat_entry(m, REGION3_ENTRY_FC, pgtable_level_pud);
100 	p[0] = 0;
101 
102 	/*
103 	 * Region1/2/3 protection should not work, because EDAT is off.
104 	 * Protect the various region1/2/3 entries and write, expect the
105 	 * write to be successful.
106 	 */
107 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud);
108 	p[0] = 42;
109 	report(p[0] == 42, "pud, ro");
110 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud);
111 	p[0] = 0;
112 
113 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d);
114 	p[0] = 42;
115 	report(p[0] == 42, "p4d, ro");
116 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d);
117 	p[0] = 0;
118 
119 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd);
120 	p[0] = 42;
121 	report(p[0] == 42, "pgd, ro");
122 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd);
123 	p[0] = 0;
124 
125 	report_prefix_pop();
126 }
127 
test_edat1(void)128 static void test_edat1(void)
129 {
130 	report_prefix_push("edat1");
131 	/* Enable EDAT */
132 	ctl_set_bit(0, CTL0_EDAT);
133 	p[0] = 0;
134 
135 	/*
136 	 * Segment protection should work normally, try to write and expect
137 	 * a fault.
138 	 */
139 	expect_pgm_int();
140 	protect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd);
141 	p[0] = 42;
142 	report(!p[0] && check_pgm_prot(m), "pmd, ro");
143 	unprotect_dat_entry(m, SEGMENT_ENTRY_P, pgtable_level_pmd);
144 
145 	/*
146 	 * Region1/2/3 protection should work now, because EDAT is on. Try
147 	 * to write anyway and expect a fault.
148 	 */
149 	expect_pgm_int();
150 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud);
151 	p[0] = 42;
152 	report(!p[0] && check_pgm_prot(m), "pud, ro");
153 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pud);
154 
155 	expect_pgm_int();
156 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d);
157 	p[0] = 42;
158 	report(!p[0] && check_pgm_prot(m), "p4d, ro");
159 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_p4d);
160 
161 	expect_pgm_int();
162 	protect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd);
163 	p[0] = 42;
164 	report(!p[0] && check_pgm_prot(m), "pgd, ro");
165 	unprotect_dat_entry(m, REGION_ENTRY_P, pgtable_level_pgd);
166 
167 	/* Large pages should work */
168 	p[0] = 42;
169 	install_large_page(root, 0, mem);
170 	report(p[0] == 42, "pmd, large");
171 
172 	/*
173 	 * Prefixing should not work with large pages. Since the lower
174 	 * addresses are mapped with small pages, which are subject to
175 	 * prefixing, and the pages mapped with large pages are not subject
176 	 * to prefixing, this is the resulting scenario:
177 	 *
178 	 * virtual 0 = real 0 -> absolute prefix_buf
179 	 * virtual prefix_buf = real prefix_buf -> absolute 0
180 	 * VIRT(0) -> absolute 0
181 	 * VIRT(prefix_buf) -> absolute prefix_buf
182 	 *
183 	 * The testcase checks if the memory at virtual 0 has the same
184 	 * content as the memory at VIRT(prefix_buf) and the memory at
185 	 * VIRT(0) has the same content as the memory at virtual prefix_buf.
186 	 * If prefixing is erroneously applied for large pages, the testcase
187 	 * will therefore fail.
188 	 */
189 	report(!memcmp(0, VIRT(prefix_buf), LC_SIZE) &&
190 		!memcmp(prefix_buf, VIRT(0), LC_SIZE),
191 		"pmd, large, prefixing");
192 
193 	report_prefix_pop();
194 }
195 
test_edat2(void)196 static void test_edat2(void)
197 {
198 	uint64_t mem_end, i;
199 
200 	report_prefix_push("edat2");
201 	p[0] = 42;
202 
203 	/* Huge pages should work */
204 	install_huge_page(root, 0, mem);
205 	report(p[0] == 42, "pud, huge");
206 
207 	/* Prefixing should not work with huge pages, just like large pages */
208 	report(!memcmp(0, VIRT(prefix_buf), LC_SIZE) &&
209 		!memcmp(prefix_buf, VIRT(0), LC_SIZE),
210 		"pud, large, prefixing");
211 
212 	mem_end = get_ram_size();
213 	if (mem_end >= BIT_ULL(REGION3_SHIFT)) {
214 		report_skip("pud spanning end of memory");
215 	} else {
216 		for (i = 0; i < mem_end; i += PAGE_SIZE)
217 			READ_ONCE(*(uint64_t *)VIRT(i));
218 		for (i = mem_end; i < BIT_ULL(REGION3_SHIFT); i += PAGE_SIZE) {
219 			expect_pgm_int();
220 			READ_ONCE(*(uint64_t *)VIRT(i));
221 			assert(clear_pgm_int() == PGM_INT_CODE_ADDRESSING);
222 		}
223 		report_pass("pud spanning end of memory");
224 	}
225 
226 	report_prefix_pop();
227 }
228 
setup(void)229 static unsigned int setup(void)
230 {
231 	bool has_edat1 = test_facility(8);
232 	bool has_edat2 = test_facility(78);
233 	unsigned long pa, va;
234 
235 	if (has_edat2 && !has_edat1)
236 		report_abort("EDAT2 available, but EDAT1 not available");
237 
238 	/* Setup DAT 1:1 mapping and memory management */
239 	setup_vm();
240 	root = (void *)(stctg(1) & PAGE_MASK);
241 
242 	/*
243 	 * Get a pgd worth of virtual memory, so we can test things later
244 	 * without interfering with the test code or the interrupt handler
245 	 */
246 	mem = alloc_vpages_aligned(BIT_ULL(PGD_PAGE_SHIFT), PGD_PAGE_SHIFT);
247 	assert(mem);
248 	va = (unsigned long)mem;
249 
250 	/* Map the first 1GB of real memory */
251 	for (pa = 0; pa < SZ_1G; pa += PAGE_SIZE, va += PAGE_SIZE)
252 		install_page(root, pa, (void *)va);
253 
254 	/*
255 	 * Move the lowcore to a known non-zero location. This is needed
256 	 * later to check whether prefixing is working with large pages.
257 	 */
258 	assert((unsigned long)&prefix_buf < SZ_2G);
259 	memcpy(prefix_buf, 0, LC_SIZE);
260 	set_prefix((uint32_t)(uintptr_t)prefix_buf);
261 	/* Clear the old copy */
262 	memset(prefix_buf, 0, LC_SIZE);
263 
264 	/* m will point to tmp through the new virtual mapping */
265 	m = VIRT(&tmp);
266 	/* p is the same as m but volatile */
267 	p = (volatile unsigned int *)m;
268 
269 	return has_edat1 + has_edat2;
270 }
271 
main(void)272 int main(void)
273 {
274 	unsigned int edat;
275 
276 	report_prefix_push("edat");
277 	edat = setup();
278 
279 	test_dat();
280 
281 	if (edat)
282 		test_edat1();
283 	else
284 		report_skip("EDAT not available");
285 
286 	if (edat >= 2)
287 		test_edat2();
288 	else
289 		report_skip("EDAT2 not available");
290 
291 	report_prefix_pop();
292 	return report_summary();
293 }
294