xref: /kvm-unit-tests/s390x/uv-host.c (revision 484ab8dc95bc5ed500abac75ef4b16e1ea1df7b0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Host Ultravisor Call tests
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *  Janosch Frank <frankja@linux.ibm.com>
9  */
10 
11 #include <libcflat.h>
12 #include <hardware.h>
13 #include <alloc.h>
14 #include <vmalloc.h>
15 #include <sclp.h>
16 #include <smp.h>
17 #include <uv.h>
18 #include <snippet.h>
19 #include <mmu.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/interrupt.h>
24 #include <asm/facility.h>
25 #include <asm/pgtable.h>
26 #include <asm/uv.h>
27 #include <asm-generic/barrier.h>
28 
29 static struct uv_cb_qui uvcb_qui;
30 static struct uv_cb_init uvcb_init;
31 static struct uv_cb_cgc uvcb_cgc;
32 static struct uv_cb_csc uvcb_csc;
33 
34 extern int diag308_load_reset(u64 code);
35 
36 struct cmd_list {
37 	const char *name;
38 	uint16_t cmd;
39 	uint16_t len;
40 	int call_bit;
41 };
42 
43 static void cpu_loop(void)
44 {
45 	for (;;) {}
46 }
47 
48 /*
49  * Checks if a memory area is protected as secure memory.
50  * Will return true if all pages are protected, false otherwise.
51  */
52 static bool access_check_3d(uint8_t *access_ptr, uint64_t len)
53 {
54 	assert(!(len & ~PAGE_MASK));
55 	assert(!((uint64_t)access_ptr & ~PAGE_MASK));
56 
57 	while (len) {
58 		expect_pgm_int();
59 		READ_ONCE(*access_ptr);
60 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
61 			return false;
62 		expect_pgm_int();
63 		WRITE_ONCE(*access_ptr, 42);
64 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
65 			return false;
66 
67 		access_ptr += PAGE_SIZE;
68 		len -= PAGE_SIZE;
69 	}
70 
71 	return true;
72 }
73 
74 static struct cmd_list cmds[] = {
75 	{ "init", UVC_CMD_INIT_UV, sizeof(struct uv_cb_init), BIT_UVC_CMD_INIT_UV },
76 	{ "create conf", UVC_CMD_CREATE_SEC_CONF, sizeof(struct uv_cb_cgc), BIT_UVC_CMD_CREATE_SEC_CONF },
77 	{ "destroy conf", UVC_CMD_DESTROY_SEC_CONF, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CONF },
78 	{ "create cpu", UVC_CMD_CREATE_SEC_CPU, sizeof(struct uv_cb_csc), BIT_UVC_CMD_CREATE_SEC_CPU },
79 	{ "destroy cpu", UVC_CMD_DESTROY_SEC_CPU, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CPU },
80 	{ "conv to", UVC_CMD_CONV_TO_SEC_STOR, sizeof(struct uv_cb_cts), BIT_UVC_CMD_CONV_TO_SEC_STOR },
81 	{ "conv from", UVC_CMD_CONV_FROM_SEC_STOR, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_CONV_FROM_SEC_STOR },
82 	{ "set sec conf", UVC_CMD_SET_SEC_CONF_PARAMS, sizeof(struct uv_cb_ssc), BIT_UVC_CMD_SET_SEC_PARMS },
83 	{ "unpack", UVC_CMD_UNPACK_IMG, sizeof(struct uv_cb_unp), BIT_UVC_CMD_UNPACK_IMG },
84 	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_VERIFY_IMG },
85 	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET },
86 	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET_INITIAL },
87 	{ "conf clear reset", UVC_CMD_PREPARE_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_RESET },
88 	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET },
89 	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct uv_cb_cpu_set_state), BIT_UVC_CMD_CPU_SET_STATE },
90 	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_PIN_PAGE_SHARED },
91 	{ "unpin shared", UVC_CMD_UNPIN_PAGE_SHARED, sizeof(struct uv_cb_cts), BIT_UVC_CMD_UNPIN_PAGE_SHARED },
92 	{ NULL, 0, 0 },
93 };
94 
95 static void test_i3(void)
96 {
97 	struct uv_cb_header uvcb = {
98 		.cmd = UVC_CMD_INIT_UV,
99 		.len = sizeof(struct uv_cb_init),
100 	};
101 	unsigned long r1 = 0;
102 	int cc;
103 
104 	report_prefix_push("i3");
105 	expect_pgm_int();
106 	asm volatile(
107 		"0:	.insn rrf,0xB9A40000,%[r1],%[r2],4,2\n"
108 		"		ipm	%[cc]\n"
109 		"		srl	%[cc],28\n"
110 		: [cc] "=d" (cc)
111 		: [r1] "a" (r1), [r2] "a" (&uvcb)
112 		: "memory", "cc");
113 	check_pgm_int_code(PGM_INT_CODE_SPECIFICATION);
114 	report_prefix_pop();
115 }
116 
117 static void test_priv(void)
118 {
119 	struct uv_cb_header uvcb = {};
120 	uint16_t pgm;
121 	int i;
122 
123 	report_prefix_push("privileged");
124 	for (i = 0; cmds[i].name; i++) {
125 		expect_pgm_int();
126 		uvcb.cmd = cmds[i].cmd;
127 		uvcb.len = cmds[i].len;
128 		enter_pstate();
129 		uv_call_once(0, (uint64_t)&uvcb);
130 		pgm = clear_pgm_int();
131 		report(pgm == PGM_INT_CODE_PRIVILEGED_OPERATION, "%s", cmds[i].name);
132 	}
133 	report_prefix_pop();
134 }
135 
136 static void test_uv_uninitialized(void)
137 {
138 	struct uv_cb_header uvcb = {};
139 	int i;
140 
141 	report_prefix_push("uninitialized");
142 
143 	for (i = 0; cmds[i].name; i++) {
144 		if (cmds[i].cmd == UVC_CMD_INIT_UV)
145 			continue;
146 		expect_pgm_int();
147 		uvcb.cmd = cmds[i].cmd;
148 		uvcb.len = cmds[i].len;
149 		uv_call_once(0, (uint64_t)&uvcb);
150 		report(uvcb.rc == UVC_RC_INV_STATE, "%s", cmds[i].name);
151 	}
152 	report_prefix_pop();
153 }
154 
155 static void test_access(void)
156 {
157 	struct uv_cb_header *uvcb;
158 	void *pages =  alloc_pages(1);
159 	uint16_t pgm;
160 	int i;
161 
162 	/* Put UVCB on second page which we will protect later */
163 	uvcb = pages + PAGE_SIZE;
164 
165 	report_prefix_push("access");
166 
167 	report_prefix_push("non-crossing");
168 	protect_page(uvcb, PAGE_ENTRY_I);
169 	for (i = 0; cmds[i].name; i++) {
170 		expect_pgm_int();
171 		mb();
172 		uv_call_once(0, (uint64_t)uvcb);
173 		pgm = clear_pgm_int();
174 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
175 	}
176 	report_prefix_pop();
177 
178 	report_prefix_push("crossing");
179 	/*
180 	 * Put the header into the readable page 1, everything after
181 	 * the header will be on the second, invalid page.
182 	 */
183 	uvcb -= 1;
184 	for (i = 0; cmds[i].name; i++) {
185 		uvcb->cmd = cmds[i].cmd;
186 		uvcb->len = cmds[i].len;
187 
188 		expect_pgm_int();
189 		mb();
190 		uv_call_once(0, (uint64_t)uvcb);
191 		pgm = clear_pgm_int();
192 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
193 	}
194 	report_prefix_pop();
195 
196 	uvcb += 1;
197 	unprotect_page(uvcb, PAGE_ENTRY_I);
198 
199 	free_pages(pages);
200 	report_prefix_pop();
201 }
202 
203 static void test_config_destroy(void)
204 {
205 	int rc;
206 	struct uv_cb_nodata uvcb = {
207 		.header.cmd = UVC_CMD_DESTROY_SEC_CONF,
208 		.header.len = sizeof(uvcb),
209 		.handle = uvcb_cgc.guest_handle,
210 	};
211 
212 	report_prefix_push("dsc");
213 	uvcb.header.len -= 8;
214 	rc = uv_call(0, (uint64_t)&uvcb);
215 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
216 	       "hdr invalid length");
217 	uvcb.header.len += 8;
218 
219 	uvcb.handle += 1;
220 	rc = uv_call(0, (uint64_t)&uvcb);
221 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
222 	uvcb.handle -= 1;
223 
224 	rc = uv_call(0, (uint64_t)&uvcb);
225 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
226 	report_prefix_pop();
227 }
228 
229 static void test_cpu_destroy(void)
230 {
231 	int rc;
232 	struct uv_cb_nodata uvcb = {
233 		.header.len = sizeof(uvcb),
234 		.header.cmd = UVC_CMD_DESTROY_SEC_CPU,
235 		.handle = uvcb_csc.cpu_handle,
236 	};
237 
238 	report_prefix_push("dcpu");
239 
240 	uvcb.header.len -= 8;
241 	rc = uv_call(0, (uint64_t)&uvcb);
242 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
243 	       "hdr invalid length");
244 	uvcb.header.len += 8;
245 
246 	if (!machine_is_z15()) {
247 		uvcb.handle += 1;
248 		rc = uv_call(0, (uint64_t)&uvcb);
249 		report(rc == 1 && uvcb.header.rc == UVC_RC_INV_CHANDLE, "invalid handle");
250 		uvcb.handle -= 1;
251 	}
252 
253 	rc = uv_call(0, (uint64_t)&uvcb);
254 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
255 
256 	report_prefix_pop();
257 }
258 
259 static void test_set_se_header(void)
260 {
261 	struct uv_cb_ssc uvcb = {
262 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
263 		.header.len = sizeof(uvcb),
264 		.guest_handle = uvcb_cgc.guest_handle,
265 		.sec_header_origin = 0,
266 		.sec_header_len = 0x1000,
267 	};
268 	void *pages =  alloc_pages(1);
269 	void *inv;
270 	int rc;
271 
272 	report_prefix_push("sscp");
273 
274 	uvcb.header.len -= 8;
275 	rc = uv_call(0, (uint64_t)&uvcb);
276 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
277 	       "hdr invalid length");
278 	uvcb.header.len += 8;
279 
280 	uvcb.guest_handle += 1;
281 	rc = uv_call(0, (uint64_t)&uvcb);
282 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
283 	uvcb.guest_handle -= 1;
284 
285 	inv = pages + PAGE_SIZE;
286 	uvcb.sec_header_origin = (uint64_t)inv;
287 	protect_page(inv, PAGE_ENTRY_I);
288 	rc = uv_call(0, (uint64_t)&uvcb);
289 	report(rc == 1 && uvcb.header.rc == 0x103,
290 	       "se hdr access exception");
291 
292 	/*
293 	 * Shift the ptr so the first few DWORDs are accessible but
294 	 * the following are on an invalid page.
295 	 */
296 	uvcb.sec_header_origin -= 0x20;
297 	rc = uv_call(0, (uint64_t)&uvcb);
298 	report(rc == 1 && uvcb.header.rc == 0x103,
299 	       "se hdr access exception crossing");
300 	unprotect_page(inv, PAGE_ENTRY_I);
301 
302 	free_pages(pages);
303 	report_prefix_pop();
304 }
305 
306 static void test_cpu_create(void)
307 {
308 	int rc;
309 	unsigned long tmp;
310 
311 	report_prefix_push("csc");
312 	uvcb_csc.header.len = sizeof(uvcb_csc);
313 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
314 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
315 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
316 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
317 
318 	uvcb_csc.header.len -= 8;
319 	rc = uv_call(0, (uint64_t)&uvcb_csc);
320 	report(uvcb_csc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
321 	       !uvcb_csc.cpu_handle, "hdr invalid length");
322 	uvcb_csc.header.len += 8;
323 
324 	uvcb_csc.guest_handle += 1;
325 	rc = uv_call(0, (uint64_t)&uvcb_csc);
326 	report(uvcb_csc.header.rc == UVC_RC_INV_GHANDLE && rc == 1,
327 	       "invalid guest handle");
328 	uvcb_csc.guest_handle -= 1;
329 
330 	uvcb_csc.num = uvcb_qui.max_guest_cpus + 1;
331 	rc = uv_call(0, (uint64_t)&uvcb_csc);
332 	report(uvcb_csc.header.rc == 0x103 && rc == 1,
333 	       "invalid cpu #");
334 	uvcb_csc.num = 0;
335 
336 	tmp = uvcb_csc.stor_origin;
337 	uvcb_csc.stor_origin = get_max_ram_size() + PAGE_SIZE;
338 	rc = uv_call(0, (uint64_t)&uvcb_csc);
339 	report(uvcb_csc.header.rc == 0x105 && rc == 1,
340 	       "cpu stor inaccessible");
341 	uvcb_csc.stor_origin = tmp;
342 
343 	tmp = uvcb_csc.stor_origin;
344 	uvcb_csc.stor_origin = 0;
345 	rc = uv_call(0, (uint64_t)&uvcb_csc);
346 	report(uvcb_csc.header.rc == 0x106 && rc == 1,
347 	       "cpu stor in lowcore");
348 	uvcb_csc.stor_origin = tmp;
349 
350 	tmp = uvcb_csc.state_origin;
351 	uvcb_csc.state_origin = get_max_ram_size() + PAGE_SIZE;
352 	rc = uv_call(0, (uint64_t)&uvcb_csc);
353 	report(uvcb_csc.header.rc == 0x107 && rc == 1,
354 	       "SIE SD inaccessible");
355 	uvcb_csc.state_origin = tmp;
356 
357 	rc = uv_call(0, (uint64_t)&uvcb_csc);
358 	report(rc == 0 && uvcb_csc.header.rc == UVC_RC_EXECUTED &&
359 	       uvcb_csc.cpu_handle, "success");
360 
361 	rc = access_check_3d((uint8_t *)uvcb_csc.stor_origin,
362 			     uvcb_qui.cpu_stor_len);
363 	report(rc, "Storage protection");
364 
365 	tmp = uvcb_csc.stor_origin;
366 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
367 	rc = uv_call(0, (uint64_t)&uvcb_csc);
368 	report(rc == 1 && uvcb_csc.header.rc == 0x104, "already defined");
369 	uvcb_csc.stor_origin = tmp;
370 	report_prefix_pop();
371 }
372 
373 /*
374  * If the first bit of the rc is set we need to destroy the
375  * configuration before testing other create config errors.
376  */
377 static void cgc_destroy_if_needed(struct uv_cb_cgc *uvcb)
378 {
379 	uint16_t rc, rrc;
380 
381 	if (uvcb->header.rc != UVC_RC_EXECUTED &&
382 	    !(uvcb->header.rc & UVC_RC_DSTR_NEEDED_FLG))
383 		return;
384 
385 	assert(uvcb->guest_handle);
386 	assert(!uv_cmd_nodata(uvcb->guest_handle, UVC_CMD_DESTROY_SEC_CONF,
387 			      &rc, &rrc));
388 
389 	/* We need to zero it for the next test */
390 	uvcb->guest_handle = 0;
391 }
392 
393 static bool cgc_check_data(struct uv_cb_cgc *uvcb, uint16_t rc_expected)
394 {
395 	/* This function purely checks for error rcs */
396 	if (uvcb->header.rc == UVC_RC_EXECUTED)
397 		return false;
398 
399 	/*
400 	 * We should only receive a handle when the rc is 1 or the
401 	 * first bit is set.
402 	 */
403 	if (!(uvcb->header.rc & UVC_RC_DSTR_NEEDED_FLG) && uvcb->guest_handle)
404 		report_abort("Received a handle when we didn't expect one");
405 
406 	return (uvcb->header.rc & ~UVC_RC_DSTR_NEEDED_FLG) == rc_expected;
407 }
408 
409 static void test_config_create(void)
410 {
411 	int rc;
412 	unsigned long vsize, tmp;
413 	static struct uv_cb_cgc uvcb;
414 
415 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
416 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
417 	report_prefix_push("cgc");
418 
419 	uvcb_cgc.guest_stor_origin = 0;
420 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
421 	vsize = uvcb_qui.conf_base_virt_stor_len +
422 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
423 
424 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
425 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
426 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
427 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
428 
429 	uvcb_cgc.header.len -= 8;
430 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
431 	report(uvcb_cgc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
432 	       !uvcb_cgc.guest_handle, "hdr invalid length");
433 	cgc_destroy_if_needed(&uvcb_cgc);
434 	uvcb_cgc.header.len += 8;
435 
436 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr + (1UL << 20) * 2 + 1;
437 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
438 	report(cgc_check_data(&uvcb_cgc, 0x101) && rc == 1,
439 	       "MSO > max guest addr");
440 	cgc_destroy_if_needed(&uvcb_cgc);
441 	uvcb_cgc.guest_stor_origin = 0;
442 
443 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr - (1UL << 20);
444 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
445 	report(cgc_check_data(&uvcb_cgc, 0x102) && rc == 1,
446 	       "MSO + MSL > max guest addr");
447 	cgc_destroy_if_needed(&uvcb_cgc);
448 	uvcb_cgc.guest_stor_origin = 0;
449 
450 	uvcb_cgc.guest_asce &= ~ASCE_P;
451 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
452 	report(cgc_check_data(&uvcb_cgc, 0x105) && rc == 1,
453 	       "ASCE private bit missing");
454 	cgc_destroy_if_needed(&uvcb_cgc);
455 	uvcb_cgc.guest_asce |= ASCE_P;
456 
457 	uvcb_cgc.guest_asce |= 0x20;
458 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
459 	report(cgc_check_data(&uvcb_cgc, 0x105) && rc == 1,
460 	       "ASCE bit 58 set");
461 	cgc_destroy_if_needed(&uvcb_cgc);
462 	uvcb_cgc.guest_asce &= ~0x20;
463 
464 	tmp = uvcb_cgc.conf_base_stor_origin;
465 	uvcb_cgc.conf_base_stor_origin = get_max_ram_size() + 8;
466 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
467 	report(cgc_check_data(&uvcb_cgc, 0x108) && rc == 1,
468 	       "base storage origin > available memory");
469 	cgc_destroy_if_needed(&uvcb_cgc);
470 	uvcb_cgc.conf_base_stor_origin = tmp;
471 
472 	tmp = uvcb_cgc.conf_base_stor_origin;
473 	uvcb_cgc.conf_base_stor_origin = 0x1000;
474 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
475 	report(cgc_check_data(&uvcb_cgc, 0x109) && rc == 1,
476 	       "base storage origin contains lowcore %x",  uvcb_cgc.header.rc);
477 	cgc_destroy_if_needed(&uvcb_cgc);
478 	uvcb_cgc.conf_base_stor_origin = tmp;
479 
480 	tmp = uvcb_cgc.guest_sca;
481 	uvcb_cgc.guest_sca = 0;
482 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
483 	report(cgc_check_data(&uvcb_cgc, 0x10c) && rc == 1,
484 	       "sca == 0");
485 	cgc_destroy_if_needed(&uvcb_cgc);
486 	uvcb_cgc.guest_sca = tmp;
487 
488 	tmp = uvcb_cgc.guest_sca;
489 	uvcb_cgc.guest_sca = get_max_ram_size() + PAGE_SIZE * 4;
490 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
491 	report(cgc_check_data(&uvcb_cgc, 0x10d) && rc == 1,
492 	       "sca inaccessible");
493 	cgc_destroy_if_needed(&uvcb_cgc);
494 	uvcb_cgc.guest_sca = tmp;
495 
496 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
497 	report(rc == 0 && uvcb_cgc.header.rc == UVC_RC_EXECUTED, "successful");
498 
499 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_base_stor_origin,
500 			     uvcb_qui.conf_base_phys_stor_len);
501 	report(rc, "Base storage protection");
502 
503 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_var_stor_origin, vsize);
504 	report(rc, "Variable storage protection");
505 
506 	uvcb_cgc.header.rc = 0;
507 	uvcb_cgc.header.rrc = 0;
508 	tmp = uvcb_cgc.guest_handle;
509 	uvcb_cgc.guest_handle = 0;
510 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
511 	report(uvcb_cgc.header.rc >= 0x100 && rc == 1, "reuse uvcb");
512 	cgc_destroy_if_needed(&uvcb_cgc);
513 	uvcb_cgc.guest_handle = tmp;
514 
515 	/* Copy over most data from uvcb_cgc, so we have the ASCE that was used. */
516 	memcpy(&uvcb, &uvcb_cgc, sizeof(uvcb));
517 
518 	/* Reset the header and handle */
519 	uvcb.header.rc = 0;
520 	uvcb.header.rrc = 0;
521 	uvcb.guest_handle = 0;
522 
523 	/* Use new storage areas. */
524 	uvcb.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
525 	uvcb.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
526 
527 	rc = uv_call(0, (uint64_t)&uvcb);
528 	report(uvcb.header.rc >= 0x104 && rc == 1 && !uvcb.guest_handle,
529 	       "reuse ASCE");
530 	cgc_destroy_if_needed(&uvcb);
531 	free((void *)uvcb.conf_base_stor_origin);
532 	free((void *)uvcb.conf_var_stor_origin);
533 
534 	/* Missing: 106, 10a, a0b */
535 	report_prefix_pop();
536 }
537 
538 static void test_init(void)
539 {
540 	int rc;
541 	uint64_t tmp;
542 
543 	/*
544 	 * Donated storage needs to be over 2GB, AREA_NORMAL does that
545 	 * on s390x.
546 	 */
547 	tmp = (uint64_t)memalign_pages_flags(SZ_1M, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
548 
549 	uvcb_init.header.len = sizeof(uvcb_init);
550 	uvcb_init.header.cmd = UVC_CMD_INIT_UV;
551 	uvcb_init.stor_origin = tmp;
552 	uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
553 
554 	report_prefix_push("init");
555 	uvcb_init.header.len -= 8;
556 	rc = uv_call(0, (uint64_t)&uvcb_init);
557 	report(rc == 1 && uvcb_init.header.rc == UVC_RC_INV_LEN,
558 	       "hdr invalid length");
559 	uvcb_init.header.len += 8;
560 
561 	uvcb_init.stor_len -= 8;
562 	rc = uv_call(0, (uint64_t)&uvcb_init);
563 	report(rc == 1 && uvcb_init.header.rc == 0x103,
564 	       "storage invalid length");
565 	uvcb_init.stor_len += 8;
566 
567 	/* Storage origin is 1MB aligned, the length is 4KB aligned */
568 	uvcb_init.stor_origin = get_max_ram_size();
569 	rc = uv_call(0, (uint64_t)&uvcb_init);
570 	report(rc == 1 && (uvcb_init.header.rc == 0x104 || uvcb_init.header.rc == 0x105),
571 	       "storage origin invalid");
572 	uvcb_init.stor_origin = tmp;
573 
574 	if (uvcb_init.stor_len >= HPAGE_SIZE) {
575 		uvcb_init.stor_origin = get_max_ram_size() - HPAGE_SIZE;
576 		rc = uv_call(0, (uint64_t)&uvcb_init);
577 		report(rc == 1 && uvcb_init.header.rc == 0x105,
578 		       "storage + length invalid");
579 		uvcb_init.stor_origin = tmp;
580 	} else {
581 		report_skip("storage + length invalid, stor_len < HPAGE_SIZE");
582 	}
583 
584 	uvcb_init.stor_origin = 1UL << 30;
585 	rc = uv_call(0, (uint64_t)&uvcb_init);
586 	report(rc == 1 && uvcb_init.header.rc == 0x108,
587 	       "storage below 2GB");
588 	uvcb_init.stor_origin = tmp;
589 
590 	if (smp_query_num_cpus() > 1) {
591 		smp_cpu_setup(1, PSW_WITH_CUR_MASK(cpu_loop));
592 		rc = uv_call(0, (uint64_t)&uvcb_init);
593 		report(rc == 1 && uvcb_init.header.rc == 0x102,
594 		       "too many running cpus");
595 		smp_cpu_stop(1);
596 	} else {
597 		report_skip("Not enough cpus for 0x102 test");
598 	}
599 
600 	rc = uv_call(0, (uint64_t)&uvcb_init);
601 	report(rc == 0 && uvcb_init.header.rc == UVC_RC_EXECUTED, "successful");
602 
603 	tmp = uvcb_init.stor_origin;
604 	uvcb_init.stor_origin =	(uint64_t)memalign_pages_flags(HPAGE_SIZE, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
605 	rc = uv_call(0, (uint64_t)&uvcb_init);
606 	report(rc == 1 && uvcb_init.header.rc == 0x101, "double init");
607 	free((void *)uvcb_init.stor_origin);
608 	uvcb_init.stor_origin = tmp;
609 
610 	report_prefix_pop();
611 }
612 
613 static void test_query(void)
614 {
615 	int i = 0, cc;
616 
617 	uvcb_qui.header.cmd = UVC_CMD_QUI;
618 	uvcb_qui.header.len = sizeof(uvcb_qui);
619 
620 	report_prefix_push("query");
621 	uvcb_qui.header.len = 0xa0;
622 	uv_call(0, (uint64_t)&uvcb_qui);
623 	report(uvcb_qui.header.rc == UVC_RC_INV_LEN, "length");
624 
625 	uvcb_qui.header.len = 0xa8;
626 	uv_call(0, (uint64_t)&uvcb_qui);
627 	report(uvcb_qui.header.rc == 0x100, "insf length");
628 
629 	uvcb_qui.header.len = sizeof(uvcb_qui);
630 	cc = uv_call(0, (uint64_t)&uvcb_qui);
631 	report((!cc && uvcb_qui.header.rc == UVC_RC_EXECUTED) ||
632 	       (cc == 1 && uvcb_qui.header.rc == 0x100),
633 		"successful query");
634 
635 	for (i = 0; cmds[i].name; i++)
636 		report(uv_query_test_call(cmds[i].call_bit), "%s", cmds[i].name);
637 
638 	report_prefix_pop();
639 }
640 
641 static struct cmd_list invalid_cmds[] = {
642 	{ "bogus", 0x4242, sizeof(struct uv_cb_header), -1},
643 	{ "share", UVC_CMD_SET_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_SET_SHARED_ACCESS },
644 	{ "unshare", UVC_CMD_REMOVE_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_REMOVE_SHARED_ACCESS },
645 	{ "attest", UVC_CMD_ATTESTATION, sizeof(struct uv_cb_attest), BIT_UVC_CMD_ATTESTATION },
646 	{ NULL, 0, 0 },
647 };
648 
649 static void test_invalid(void)
650 {
651 	struct uv_cb_header hdr = {};
652 	int i, cc;
653 
654 	report_prefix_push("invalid");
655 	for (i = 0; invalid_cmds[i].name; i++) {
656 		hdr.cmd = invalid_cmds[i].cmd;
657 		hdr.len = invalid_cmds[i].len;
658 		cc = uv_call(0, (uint64_t)&hdr);
659 		report(cc == 1 && hdr.rc == UVC_RC_INV_CMD &&
660 		       (invalid_cmds[i].call_bit == -1 || !uv_query_test_call(invalid_cmds[i].call_bit)),
661 		       "%s", invalid_cmds[i].name);
662 	}
663 	report_prefix_pop();
664 }
665 
666 static void setup_test_clear(void)
667 {
668 	unsigned long vsize;
669 	int rc;
670 
671 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
672 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
673 
674 	uvcb_cgc.guest_stor_origin = 0;
675 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
676 	vsize = uvcb_qui.conf_base_virt_stor_len +
677 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
678 
679 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
680 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
681 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
682 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
683 
684 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
685 	assert(rc == 0);
686 
687 	uvcb_csc.header.len = sizeof(uvcb_csc);
688 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
689 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
690 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
691 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
692 
693 	rc = uv_call(0, (uint64_t)&uvcb_csc);
694 	assert(rc == 0);
695 }
696 
697 static void test_clear(void)
698 {
699 	uint64_t *tmp;
700 
701 	report_prefix_push("load normal reset");
702 
703 	/*
704 	 * Setup a config and a cpu so we can check if a diag308 reset
705 	 * clears the donated memory and makes the pages unsecure.
706 	 */
707 	setup_test_clear();
708 
709 	diag308_load_reset(1);
710 	sclp_console_setup();
711 
712 	tmp = (void *)uvcb_init.stor_origin;
713 	report(!*tmp, "uv init donated memory cleared");
714 
715 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
716 	report(!*tmp, "config base donated memory cleared");
717 
718 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
719 	report(!*tmp, "config variable donated memory cleared");
720 
721 	tmp = (void *)uvcb_csc.stor_origin;
722 	report(!*tmp, "cpu donated memory cleared after reset 1");
723 
724 	/* Check if uninitialized after reset */
725 	test_uv_uninitialized();
726 
727 	report_prefix_pop();
728 }
729 
730 static void setup_vmem(void)
731 {
732 	uint64_t asce;
733 
734 	setup_mmu(get_max_ram_size(), NULL);
735 	/*
736 	 * setup_mmu() will enable DAT and set the primary address
737 	 * space but we need to have a valid home space since UV calls
738 	 * take home space virtual addresses.
739 	 *
740 	 * Hence we just copy the primary asce into the home space.
741 	 */
742 	asce = stctg(1);
743 	lctlg(13, asce);
744 }
745 
746 int main(void)
747 {
748 	bool has_uvc = test_facility(158);
749 
750 	report_prefix_push("uvc");
751 	if (!has_uvc) {
752 		report_skip("Ultravisor call facility is not available");
753 		goto done;
754 	}
755 	if (!uv_os_is_host()) {
756 		report_skip("This test needs to be run in a UV host environment");
757 		goto done;
758 	}
759 
760 	test_i3();
761 	test_priv();
762 	test_invalid();
763 	test_uv_uninitialized();
764 	test_query();
765 
766 	if (get_ram_size() < SNIPPET_PV_MIN_MEM_SIZE) {
767 		report_skip("Not enough memory. This test needs about %ld MB of memory",
768 			    SNIPPET_PV_MIN_MEM_SIZE / SZ_1M);
769 		goto done;
770 	}
771 
772 	test_init();
773 
774 	setup_vmem();
775 	test_access();
776 
777 	test_config_create();
778 	test_cpu_create();
779 	test_set_se_header();
780 	test_cpu_destroy();
781 	test_config_destroy();
782 	test_clear();
783 
784 done:
785 	return report_summary();
786 }
787