xref: /kvm-unit-tests/s390x/uv-host.c (revision 90cacd85c6ad50f032a3fe95586fab4f2335b93d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Host Ultravisor Call tests
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *  Janosch Frank <frankja@linux.ibm.com>
9  */
10 
11 #include <libcflat.h>
12 #include <hardware.h>
13 #include <alloc.h>
14 #include <vmalloc.h>
15 #include <sclp.h>
16 #include <smp.h>
17 #include <uv.h>
18 #include <snippet.h>
19 #include <mmu.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/interrupt.h>
24 #include <asm/facility.h>
25 #include <asm/pgtable.h>
26 #include <asm/uv.h>
27 #include <asm-generic/barrier.h>
28 
29 static struct uv_cb_qui uvcb_qui;
30 static struct uv_cb_init uvcb_init;
31 static struct uv_cb_cgc uvcb_cgc;
32 static struct uv_cb_csc uvcb_csc;
33 
34 extern int diag308_load_reset(u64 code);
35 
36 struct cmd_list {
37 	const char *name;
38 	uint16_t cmd;
39 	uint16_t len;
40 	int call_bit;
41 };
42 
cpu_loop(void)43 static void cpu_loop(void)
44 {
45 	for (;;) {}
46 }
47 
48 /*
49  * Checks if a memory area is protected as secure memory.
50  * Will return true if all pages are protected, false otherwise.
51  */
access_check_3d(uint8_t * access_ptr,uint64_t len)52 static bool access_check_3d(uint8_t *access_ptr, uint64_t len)
53 {
54 	assert(!(len & ~PAGE_MASK));
55 	assert(!((uint64_t)access_ptr & ~PAGE_MASK));
56 
57 	while (len) {
58 		expect_pgm_int();
59 		READ_ONCE(*access_ptr);
60 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
61 			return false;
62 		expect_pgm_int();
63 		WRITE_ONCE(*access_ptr, 42);
64 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
65 			return false;
66 
67 		access_ptr += PAGE_SIZE;
68 		len -= PAGE_SIZE;
69 	}
70 
71 	return true;
72 }
73 
74 static struct cmd_list cmds[] = {
75 	{ "init", UVC_CMD_INIT_UV, sizeof(struct uv_cb_init), BIT_UVC_CMD_INIT_UV },
76 	{ "create conf", UVC_CMD_CREATE_SEC_CONF, sizeof(struct uv_cb_cgc), BIT_UVC_CMD_CREATE_SEC_CONF },
77 	{ "destroy conf", UVC_CMD_DESTROY_SEC_CONF, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CONF },
78 	{ "create cpu", UVC_CMD_CREATE_SEC_CPU, sizeof(struct uv_cb_csc), BIT_UVC_CMD_CREATE_SEC_CPU },
79 	{ "destroy cpu", UVC_CMD_DESTROY_SEC_CPU, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CPU },
80 	{ "conv to", UVC_CMD_CONV_TO_SEC_STOR, sizeof(struct uv_cb_cts), BIT_UVC_CMD_CONV_TO_SEC_STOR },
81 	{ "conv from", UVC_CMD_CONV_FROM_SEC_STOR, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_CONV_FROM_SEC_STOR },
82 	{ "set sec conf", UVC_CMD_SET_SEC_CONF_PARAMS, sizeof(struct uv_cb_ssc), BIT_UVC_CMD_SET_SEC_PARMS },
83 	{ "unpack", UVC_CMD_UNPACK_IMG, sizeof(struct uv_cb_unp), BIT_UVC_CMD_UNPACK_IMG },
84 	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_VERIFY_IMG },
85 	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET },
86 	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET_INITIAL },
87 	{ "conf clear reset", UVC_CMD_PREPARE_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_RESET },
88 	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET },
89 	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct uv_cb_cpu_set_state), BIT_UVC_CMD_CPU_SET_STATE },
90 	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_PIN_PAGE_SHARED },
91 	{ "unpin shared", UVC_CMD_UNPIN_PAGE_SHARED, sizeof(struct uv_cb_cts), BIT_UVC_CMD_UNPIN_PAGE_SHARED },
92 	{ NULL, 0, 0 },
93 };
94 
test_i3(void)95 static void test_i3(void)
96 {
97 	struct uv_cb_header uvcb = {
98 		.cmd = UVC_CMD_INIT_UV,
99 		.len = sizeof(struct uv_cb_init),
100 	};
101 	unsigned long r1 = 0;
102 	int cc;
103 
104 	report_prefix_push("i3");
105 	expect_pgm_int();
106 	asm volatile(
107 		"0:	.insn rrf,0xB9A40000,%[r1],%[r2],4,2\n"
108 		"		ipm	%[cc]\n"
109 		"		srl	%[cc],28\n"
110 		: [cc] "=d" (cc)
111 		: [r1] "a" (r1), [r2] "a" (&uvcb)
112 		: "memory", "cc");
113 	check_pgm_int_code(PGM_INT_CODE_SPECIFICATION);
114 	report_prefix_pop();
115 }
116 
test_priv(void)117 static void test_priv(void)
118 {
119 	struct uv_cb_header uvcb = {};
120 	uint16_t pgm;
121 	int i;
122 
123 	report_prefix_push("privileged");
124 	for (i = 0; cmds[i].name; i++) {
125 		expect_pgm_int();
126 		uvcb.cmd = cmds[i].cmd;
127 		uvcb.len = cmds[i].len;
128 		enter_pstate();
129 		uv_call_once(0, (uint64_t)&uvcb);
130 		pgm = clear_pgm_int();
131 		report(pgm == PGM_INT_CODE_PRIVILEGED_OPERATION, "%s", cmds[i].name);
132 	}
133 	report_prefix_pop();
134 }
135 
test_uv_uninitialized(void)136 static void test_uv_uninitialized(void)
137 {
138 	struct uv_cb_header uvcb = {};
139 	int i;
140 
141 	report_prefix_push("uninitialized");
142 
143 	for (i = 0; cmds[i].name; i++) {
144 		if (cmds[i].cmd == UVC_CMD_INIT_UV)
145 			continue;
146 		expect_pgm_int();
147 		uvcb.cmd = cmds[i].cmd;
148 		uvcb.len = cmds[i].len;
149 		uv_call_once(0, (uint64_t)&uvcb);
150 		report(uvcb.rc == UVC_RC_INV_STATE, "%s", cmds[i].name);
151 	}
152 	report_prefix_pop();
153 }
154 
test_access(void)155 static void test_access(void)
156 {
157 	struct uv_cb_header *uvcb;
158 	void *pages =  alloc_pages(1);
159 	uint16_t pgm;
160 	int i;
161 
162 	/* Put UVCB on second page which we will protect later */
163 	uvcb = pages + PAGE_SIZE;
164 
165 	report_prefix_push("access");
166 
167 	/*
168 	 * If debug is enabled info from the uv header is printed
169 	 * which would lead to a second exception and a test abort.
170 	 */
171 	if (UVC_ERR_DEBUG) {
172 		report_skip("Debug doesn't work with access tests");
173 		goto out;
174 	}
175 
176 	report_prefix_push("non-crossing");
177 	protect_page(uvcb, PAGE_ENTRY_I);
178 	for (i = 0; cmds[i].name; i++) {
179 		expect_pgm_int();
180 		mb();
181 		uv_call_once(0, (uint64_t)uvcb);
182 		pgm = clear_pgm_int();
183 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
184 	}
185 	report_prefix_pop();
186 
187 	report_prefix_push("crossing");
188 	/*
189 	 * Put the header into the readable page 1, everything after
190 	 * the header will be on the second, invalid page.
191 	 */
192 	uvcb -= 1;
193 	for (i = 0; cmds[i].name; i++) {
194 		uvcb->cmd = cmds[i].cmd;
195 		uvcb->len = cmds[i].len;
196 
197 		expect_pgm_int();
198 		mb();
199 		uv_call_once(0, (uint64_t)uvcb);
200 		pgm = clear_pgm_int();
201 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
202 	}
203 	report_prefix_pop();
204 
205 	uvcb += 1;
206 	unprotect_page(uvcb, PAGE_ENTRY_I);
207 
208 out:
209 	free_pages(pages);
210 	report_prefix_pop();
211 }
212 
test_config_destroy(void)213 static void test_config_destroy(void)
214 {
215 	int rc;
216 	struct uv_cb_nodata uvcb = {
217 		.header.cmd = UVC_CMD_DESTROY_SEC_CONF,
218 		.header.len = sizeof(uvcb),
219 		.handle = uvcb_cgc.guest_handle,
220 	};
221 
222 	report_prefix_push("dsc");
223 	uvcb.header.len -= 8;
224 	rc = uv_call(0, (uint64_t)&uvcb);
225 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
226 	       "hdr invalid length");
227 	uvcb.header.len += 8;
228 
229 	uvcb.handle += 1;
230 	rc = uv_call(0, (uint64_t)&uvcb);
231 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
232 	uvcb.handle -= 1;
233 
234 	rc = uv_call(0, (uint64_t)&uvcb);
235 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
236 	report_prefix_pop();
237 }
238 
test_cpu_destroy(void)239 static void test_cpu_destroy(void)
240 {
241 	int rc;
242 	struct uv_cb_nodata uvcb = {
243 		.header.len = sizeof(uvcb),
244 		.header.cmd = UVC_CMD_DESTROY_SEC_CPU,
245 		.handle = uvcb_csc.cpu_handle,
246 	};
247 
248 	report_prefix_push("dcpu");
249 
250 	uvcb.header.len -= 8;
251 	rc = uv_call(0, (uint64_t)&uvcb);
252 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
253 	       "hdr invalid length");
254 	uvcb.header.len += 8;
255 
256 	if (!machine_is_z15()) {
257 		uvcb.handle += 1;
258 		rc = uv_call(0, (uint64_t)&uvcb);
259 		report(rc == 1 && uvcb.header.rc == UVC_RC_INV_CHANDLE, "invalid handle");
260 		uvcb.handle -= 1;
261 	}
262 
263 	rc = uv_call(0, (uint64_t)&uvcb);
264 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
265 
266 	report_prefix_pop();
267 }
268 
test_set_se_header(void)269 static void test_set_se_header(void)
270 {
271 	struct uv_cb_ssc uvcb = {
272 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
273 		.header.len = sizeof(uvcb),
274 		.guest_handle = uvcb_cgc.guest_handle,
275 		.sec_header_origin = 0,
276 		.sec_header_len = 0x1000,
277 	};
278 	void *pages =  alloc_pages(1);
279 	void *inv;
280 	int rc;
281 
282 	report_prefix_push("sscp");
283 
284 	uvcb.header.len -= 8;
285 	rc = uv_call(0, (uint64_t)&uvcb);
286 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
287 	       "hdr invalid length");
288 	uvcb.header.len += 8;
289 
290 	uvcb.guest_handle += 1;
291 	rc = uv_call(0, (uint64_t)&uvcb);
292 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
293 	uvcb.guest_handle -= 1;
294 
295 	inv = pages + PAGE_SIZE;
296 	uvcb.sec_header_origin = (uint64_t)inv;
297 	protect_page(inv, PAGE_ENTRY_I);
298 	rc = uv_call(0, (uint64_t)&uvcb);
299 	report(rc == 1 && uvcb.header.rc == 0x103,
300 	       "se hdr access exception");
301 
302 	/*
303 	 * Shift the ptr so the first few DWORDs are accessible but
304 	 * the following are on an invalid page.
305 	 */
306 	uvcb.sec_header_origin -= 0x20;
307 	rc = uv_call(0, (uint64_t)&uvcb);
308 	report(rc == 1 && uvcb.header.rc == 0x103,
309 	       "se hdr access exception crossing");
310 	unprotect_page(inv, PAGE_ENTRY_I);
311 
312 	free_pages(pages);
313 	report_prefix_pop();
314 }
315 
test_cpu_create(void)316 static void test_cpu_create(void)
317 {
318 	int rc;
319 	unsigned long tmp;
320 
321 	report_prefix_push("csc");
322 	uvcb_csc.header.len = sizeof(uvcb_csc);
323 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
324 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
325 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
326 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
327 
328 	uvcb_csc.header.len -= 8;
329 	rc = uv_call(0, (uint64_t)&uvcb_csc);
330 	report(uvcb_csc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
331 	       !uvcb_csc.cpu_handle, "hdr invalid length");
332 	uvcb_csc.header.len += 8;
333 
334 	uvcb_csc.guest_handle += 1;
335 	rc = uv_call(0, (uint64_t)&uvcb_csc);
336 	report(uvcb_csc.header.rc == UVC_RC_INV_GHANDLE && rc == 1,
337 	       "invalid guest handle");
338 	uvcb_csc.guest_handle -= 1;
339 
340 	uvcb_csc.num = uvcb_qui.max_guest_cpus + 1;
341 	rc = uv_call(0, (uint64_t)&uvcb_csc);
342 	report(uvcb_csc.header.rc == 0x103 && rc == 1,
343 	       "invalid cpu #");
344 	uvcb_csc.num = 0;
345 
346 	tmp = uvcb_csc.stor_origin;
347 	uvcb_csc.stor_origin = get_max_ram_size() + PAGE_SIZE;
348 	rc = uv_call(0, (uint64_t)&uvcb_csc);
349 	report(uvcb_csc.header.rc == 0x105 && rc == 1,
350 	       "cpu stor inaccessible");
351 	uvcb_csc.stor_origin = tmp;
352 
353 	tmp = uvcb_csc.stor_origin;
354 	uvcb_csc.stor_origin = 0;
355 	rc = uv_call(0, (uint64_t)&uvcb_csc);
356 	report(uvcb_csc.header.rc == 0x106 && rc == 1,
357 	       "cpu stor in lowcore");
358 	uvcb_csc.stor_origin = tmp;
359 
360 	tmp = uvcb_csc.state_origin;
361 	uvcb_csc.state_origin = get_max_ram_size() + PAGE_SIZE;
362 	rc = uv_call(0, (uint64_t)&uvcb_csc);
363 	report(uvcb_csc.header.rc == 0x107 && rc == 1,
364 	       "SIE SD inaccessible");
365 	uvcb_csc.state_origin = tmp;
366 
367 	rc = uv_call(0, (uint64_t)&uvcb_csc);
368 	report(rc == 0 && uvcb_csc.header.rc == UVC_RC_EXECUTED &&
369 	       uvcb_csc.cpu_handle, "success");
370 
371 	rc = access_check_3d((uint8_t *)uvcb_csc.stor_origin,
372 			     uvcb_qui.cpu_stor_len);
373 	report(rc, "Storage protection");
374 
375 	tmp = uvcb_csc.stor_origin;
376 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
377 	rc = uv_call(0, (uint64_t)&uvcb_csc);
378 	report(rc == 1 && uvcb_csc.header.rc == 0x104, "already defined");
379 	uvcb_csc.stor_origin = tmp;
380 	report_prefix_pop();
381 }
382 
383 /*
384  * If the first bit of the rc is set we need to destroy the
385  * configuration before testing other create config errors.
386  */
cgc_destroy_if_needed(struct uv_cb_cgc * uvcb)387 static void cgc_destroy_if_needed(struct uv_cb_cgc *uvcb)
388 {
389 	uint16_t rc, rrc;
390 
391 	if (uvcb->header.rc != UVC_RC_EXECUTED &&
392 	    !(uvcb->header.rc & UVC_RC_DSTR_NEEDED_FLG))
393 		return;
394 
395 	assert(uvcb->guest_handle);
396 	assert(!uv_cmd_nodata(uvcb->guest_handle, UVC_CMD_DESTROY_SEC_CONF,
397 			      &rc, &rrc));
398 
399 	/* We need to zero it for the next test */
400 	uvcb->guest_handle = 0;
401 }
402 
cgc_check_data(struct uv_cb_cgc * uvcb,uint16_t rc_expected)403 static bool cgc_check_data(struct uv_cb_cgc *uvcb, uint16_t rc_expected)
404 {
405 	/* This function purely checks for error rcs */
406 	if (uvcb->header.rc == UVC_RC_EXECUTED)
407 		return false;
408 
409 	/*
410 	 * We should only receive a handle when the rc is 1 or the
411 	 * first bit is set.
412 	 */
413 	if (!(uvcb->header.rc & UVC_RC_DSTR_NEEDED_FLG) && uvcb->guest_handle)
414 		report_abort("Received a handle when we didn't expect one");
415 
416 	return (uvcb->header.rc & ~UVC_RC_DSTR_NEEDED_FLG) == rc_expected;
417 }
418 
test_config_create(void)419 static void test_config_create(void)
420 {
421 	int rc;
422 	unsigned long vsize, tmp;
423 	static struct uv_cb_cgc uvcb;
424 
425 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
426 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
427 	report_prefix_push("cgc");
428 
429 	uvcb_cgc.guest_stor_origin = 0;
430 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
431 	vsize = uvcb_qui.conf_base_virt_stor_len +
432 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
433 
434 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
435 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
436 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
437 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
438 
439 	uvcb_cgc.header.len -= 8;
440 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
441 	report(uvcb_cgc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
442 	       !uvcb_cgc.guest_handle, "hdr invalid length");
443 	cgc_destroy_if_needed(&uvcb_cgc);
444 	uvcb_cgc.header.len += 8;
445 
446 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr + (1UL << 20) * 2 + 1;
447 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
448 	report(cgc_check_data(&uvcb_cgc, 0x101) && rc == 1,
449 	       "MSO > max guest addr");
450 	cgc_destroy_if_needed(&uvcb_cgc);
451 	uvcb_cgc.guest_stor_origin = 0;
452 
453 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr - (1UL << 20);
454 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
455 	report(cgc_check_data(&uvcb_cgc, 0x102) && rc == 1,
456 	       "MSO + MSL > max guest addr");
457 	cgc_destroy_if_needed(&uvcb_cgc);
458 	uvcb_cgc.guest_stor_origin = 0;
459 
460 	uvcb_cgc.guest_asce &= ~ASCE_P;
461 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
462 	report(cgc_check_data(&uvcb_cgc, 0x105) && rc == 1,
463 	       "ASCE private bit missing");
464 	cgc_destroy_if_needed(&uvcb_cgc);
465 	uvcb_cgc.guest_asce |= ASCE_P;
466 
467 	uvcb_cgc.guest_asce |= 0x20;
468 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
469 	report(cgc_check_data(&uvcb_cgc, 0x105) && rc == 1,
470 	       "ASCE bit 58 set");
471 	cgc_destroy_if_needed(&uvcb_cgc);
472 	uvcb_cgc.guest_asce &= ~0x20;
473 
474 	tmp = uvcb_cgc.conf_base_stor_origin;
475 	uvcb_cgc.conf_base_stor_origin = get_max_ram_size() + 8;
476 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
477 	report(cgc_check_data(&uvcb_cgc, 0x108) && rc == 1,
478 	       "base storage origin > available memory");
479 	cgc_destroy_if_needed(&uvcb_cgc);
480 	uvcb_cgc.conf_base_stor_origin = tmp;
481 
482 	tmp = uvcb_cgc.conf_base_stor_origin;
483 	uvcb_cgc.conf_base_stor_origin = 0x1000;
484 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
485 	report(cgc_check_data(&uvcb_cgc, 0x109) && rc == 1,
486 	       "base storage origin contains lowcore %x",  uvcb_cgc.header.rc);
487 	cgc_destroy_if_needed(&uvcb_cgc);
488 	uvcb_cgc.conf_base_stor_origin = tmp;
489 
490 	tmp = uvcb_cgc.guest_sca;
491 	uvcb_cgc.guest_sca = 0;
492 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
493 	report(cgc_check_data(&uvcb_cgc, 0x10c) && rc == 1,
494 	       "sca == 0");
495 	cgc_destroy_if_needed(&uvcb_cgc);
496 	uvcb_cgc.guest_sca = tmp;
497 
498 	tmp = uvcb_cgc.guest_sca;
499 	uvcb_cgc.guest_sca = get_max_ram_size() + PAGE_SIZE * 4;
500 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
501 	report(cgc_check_data(&uvcb_cgc, 0x10d) && rc == 1,
502 	       "sca inaccessible");
503 	cgc_destroy_if_needed(&uvcb_cgc);
504 	uvcb_cgc.guest_sca = tmp;
505 
506 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
507 	report(rc == 0 && uvcb_cgc.header.rc == UVC_RC_EXECUTED, "successful");
508 
509 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_base_stor_origin,
510 			     uvcb_qui.conf_base_phys_stor_len);
511 	report(rc, "Base storage protection");
512 
513 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_var_stor_origin, vsize);
514 	report(rc, "Variable storage protection");
515 
516 	uvcb_cgc.header.rc = 0;
517 	uvcb_cgc.header.rrc = 0;
518 	tmp = uvcb_cgc.guest_handle;
519 	uvcb_cgc.guest_handle = 0;
520 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
521 	report(uvcb_cgc.header.rc >= 0x100 && rc == 1, "reuse uvcb");
522 	cgc_destroy_if_needed(&uvcb_cgc);
523 	uvcb_cgc.guest_handle = tmp;
524 
525 	/* Copy over most data from uvcb_cgc, so we have the ASCE that was used. */
526 	memcpy(&uvcb, &uvcb_cgc, sizeof(uvcb));
527 
528 	/* Reset the header and handle */
529 	uvcb.header.rc = 0;
530 	uvcb.header.rrc = 0;
531 	uvcb.guest_handle = 0;
532 
533 	/* Use new storage areas. */
534 	uvcb.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
535 	uvcb.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
536 
537 	rc = uv_call(0, (uint64_t)&uvcb);
538 	report(uvcb.header.rc >= 0x104 && rc == 1 && !uvcb.guest_handle,
539 	       "reuse ASCE");
540 	cgc_destroy_if_needed(&uvcb);
541 	free((void *)uvcb.conf_base_stor_origin);
542 	free((void *)uvcb.conf_var_stor_origin);
543 
544 	/* Missing: 106, 10a, a0b */
545 	report_prefix_pop();
546 }
547 
test_init(void)548 static void test_init(void)
549 {
550 	int rc;
551 	uint64_t tmp;
552 
553 	/*
554 	 * Donated storage needs to be over 2GB, AREA_NORMAL does that
555 	 * on s390x.
556 	 */
557 	tmp = (uint64_t)memalign_pages_flags(SZ_1M, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
558 
559 	uvcb_init.header.len = sizeof(uvcb_init);
560 	uvcb_init.header.cmd = UVC_CMD_INIT_UV;
561 	uvcb_init.stor_origin = tmp;
562 	uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
563 
564 	report_prefix_push("init");
565 	uvcb_init.header.len -= 8;
566 	rc = uv_call(0, (uint64_t)&uvcb_init);
567 	report(rc == 1 && uvcb_init.header.rc == UVC_RC_INV_LEN,
568 	       "hdr invalid length");
569 	uvcb_init.header.len += 8;
570 
571 	uvcb_init.stor_len -= 8;
572 	rc = uv_call(0, (uint64_t)&uvcb_init);
573 	report(rc == 1 && uvcb_init.header.rc == 0x103,
574 	       "storage invalid length");
575 	uvcb_init.stor_len += 8;
576 
577 	/* Storage origin is 1MB aligned, the length is 4KB aligned */
578 	uvcb_init.stor_origin = get_max_ram_size();
579 	rc = uv_call(0, (uint64_t)&uvcb_init);
580 	report(rc == 1 && (uvcb_init.header.rc == 0x104 || uvcb_init.header.rc == 0x105),
581 	       "storage origin invalid");
582 	uvcb_init.stor_origin = tmp;
583 
584 	if (uvcb_init.stor_len >= HPAGE_SIZE) {
585 		uvcb_init.stor_origin = get_max_ram_size() - HPAGE_SIZE;
586 		rc = uv_call(0, (uint64_t)&uvcb_init);
587 		report(rc == 1 && uvcb_init.header.rc == 0x105,
588 		       "storage + length invalid");
589 		uvcb_init.stor_origin = tmp;
590 	} else {
591 		report_skip("storage + length invalid, stor_len < HPAGE_SIZE");
592 	}
593 
594 	uvcb_init.stor_origin = 1UL << 30;
595 	rc = uv_call(0, (uint64_t)&uvcb_init);
596 	report(rc == 1 && uvcb_init.header.rc == 0x108,
597 	       "storage below 2GB");
598 	uvcb_init.stor_origin = tmp;
599 
600 	if (smp_query_num_cpus() > 1) {
601 		smp_cpu_setup(1, PSW_WITH_CUR_MASK(cpu_loop));
602 		rc = uv_call(0, (uint64_t)&uvcb_init);
603 		report(rc == 1 && uvcb_init.header.rc == 0x102,
604 		       "too many running cpus");
605 		smp_cpu_stop(1);
606 	} else {
607 		report_skip("Not enough cpus for 0x102 test");
608 	}
609 
610 	rc = uv_call(0, (uint64_t)&uvcb_init);
611 	report(rc == 0 && uvcb_init.header.rc == UVC_RC_EXECUTED, "successful");
612 
613 	tmp = uvcb_init.stor_origin;
614 	uvcb_init.stor_origin =	(uint64_t)memalign_pages_flags(HPAGE_SIZE, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
615 	rc = uv_call(0, (uint64_t)&uvcb_init);
616 	report(rc == 1 && uvcb_init.header.rc == 0x101, "double init");
617 	free((void *)uvcb_init.stor_origin);
618 	uvcb_init.stor_origin = tmp;
619 
620 	report_prefix_pop();
621 }
622 
test_query(void)623 static void test_query(void)
624 {
625 	int i = 0, cc;
626 
627 	uvcb_qui.header.cmd = UVC_CMD_QUI;
628 	uvcb_qui.header.len = sizeof(uvcb_qui);
629 
630 	report_prefix_push("query");
631 	uvcb_qui.header.len = 0xa0;
632 	uv_call(0, (uint64_t)&uvcb_qui);
633 	report(uvcb_qui.header.rc == UVC_RC_INV_LEN, "length");
634 
635 	uvcb_qui.header.len = 0xa8;
636 	uv_call(0, (uint64_t)&uvcb_qui);
637 	report(uvcb_qui.header.rc == 0x100, "insf length");
638 
639 	uvcb_qui.header.len = sizeof(uvcb_qui);
640 	cc = uv_call(0, (uint64_t)&uvcb_qui);
641 	report((!cc && uvcb_qui.header.rc == UVC_RC_EXECUTED) ||
642 	       (cc == 1 && uvcb_qui.header.rc == 0x100),
643 		"successful query");
644 
645 	for (i = 0; cmds[i].name; i++)
646 		report(uv_query_test_call(cmds[i].call_bit), "%s", cmds[i].name);
647 
648 	report_prefix_pop();
649 }
650 
651 static struct cmd_list invalid_cmds[] = {
652 	{ "bogus", 0x4242, sizeof(struct uv_cb_header), -1},
653 	{ "share", UVC_CMD_SET_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_SET_SHARED_ACCESS },
654 	{ "unshare", UVC_CMD_REMOVE_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_REMOVE_SHARED_ACCESS },
655 	{ "attest", UVC_CMD_ATTESTATION, sizeof(struct uv_cb_attest), BIT_UVC_CMD_ATTESTATION },
656 	{ NULL, 0, 0 },
657 };
658 
test_invalid(void)659 static void test_invalid(void)
660 {
661 	struct uv_cb_header hdr = {};
662 	int i, cc;
663 
664 	report_prefix_push("invalid");
665 	for (i = 0; invalid_cmds[i].name; i++) {
666 		hdr.cmd = invalid_cmds[i].cmd;
667 		hdr.len = invalid_cmds[i].len;
668 		cc = uv_call(0, (uint64_t)&hdr);
669 		report(cc == 1 && hdr.rc == UVC_RC_INV_CMD &&
670 		       (invalid_cmds[i].call_bit == -1 || !uv_query_test_call(invalid_cmds[i].call_bit)),
671 		       "%s", invalid_cmds[i].name);
672 	}
673 	report_prefix_pop();
674 }
675 
setup_test_clear(void)676 static void setup_test_clear(void)
677 {
678 	unsigned long vsize;
679 	int rc;
680 
681 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
682 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
683 
684 	uvcb_cgc.guest_stor_origin = 0;
685 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
686 	vsize = uvcb_qui.conf_base_virt_stor_len +
687 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
688 
689 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
690 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
691 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
692 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
693 
694 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
695 	assert(rc == 0);
696 
697 	uvcb_csc.header.len = sizeof(uvcb_csc);
698 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
699 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
700 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
701 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
702 
703 	rc = uv_call(0, (uint64_t)&uvcb_csc);
704 	assert(rc == 0);
705 }
706 
test_clear(void)707 static void test_clear(void)
708 {
709 	uint64_t *tmp;
710 
711 	report_prefix_push("load normal reset");
712 
713 	/*
714 	 * Setup a config and a cpu so we can check if a diag308 reset
715 	 * clears the donated memory and makes the pages unsecure.
716 	 */
717 	setup_test_clear();
718 
719 	diag308_load_reset(1);
720 	sclp_console_setup();
721 
722 	tmp = (void *)uvcb_init.stor_origin;
723 	report(!*tmp, "uv init donated memory cleared");
724 
725 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
726 	report(!*tmp, "config base donated memory cleared");
727 
728 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
729 	report(!*tmp, "config variable donated memory cleared");
730 
731 	tmp = (void *)uvcb_csc.stor_origin;
732 	report(!*tmp, "cpu donated memory cleared after reset 1");
733 
734 	/* Check if uninitialized after reset */
735 	test_uv_uninitialized();
736 
737 	report_prefix_pop();
738 }
739 
setup_vmem(void)740 static void setup_vmem(void)
741 {
742 	uint64_t asce;
743 
744 	setup_mmu(get_max_ram_size(), NULL);
745 	/*
746 	 * setup_mmu() will enable DAT and set the primary address
747 	 * space but we need to have a valid home space since UV calls
748 	 * take home space virtual addresses.
749 	 *
750 	 * Hence we just copy the primary asce into the home space.
751 	 */
752 	asce = stctg(1);
753 	lctlg(13, asce);
754 }
755 
main(void)756 int main(void)
757 {
758 	bool has_uvc = test_facility(158);
759 
760 	report_prefix_push("uvc");
761 	if (!has_uvc) {
762 		report_skip("Ultravisor call facility is not available");
763 		goto done;
764 	}
765 	if (!uv_os_is_host()) {
766 		report_skip("This test needs to be run in a UV host environment");
767 		goto done;
768 	}
769 
770 	test_i3();
771 	test_priv();
772 	test_invalid();
773 	test_uv_uninitialized();
774 	test_query();
775 
776 	if (get_ram_size() < SNIPPET_PV_MIN_MEM_SIZE) {
777 		report_skip("Not enough memory. This test needs about %ld MB of memory",
778 			    SNIPPET_PV_MIN_MEM_SIZE / SZ_1M);
779 		goto done;
780 	}
781 
782 	test_init();
783 
784 	setup_vmem();
785 	test_access();
786 
787 	test_config_create();
788 	test_cpu_create();
789 	test_set_se_header();
790 	test_cpu_destroy();
791 	test_config_destroy();
792 	test_clear();
793 
794 done:
795 	return report_summary();
796 }
797