xref: /kvm-unit-tests/s390x/uv-host.c (revision b36f35a82ff4cec5f71a68aa782332e2bc3488f7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Guest Ultravisor Call tests
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *  Janosch Frank <frankja@linux.ibm.com>
9  */
10 
11 #include <libcflat.h>
12 #include <hardware.h>
13 #include <alloc.h>
14 #include <vmalloc.h>
15 #include <sclp.h>
16 #include <smp.h>
17 #include <uv.h>
18 #include <mmu.h>
19 #include <asm/page.h>
20 #include <asm/sigp.h>
21 #include <asm/pgtable.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/interrupt.h>
24 #include <asm/facility.h>
25 #include <asm/pgtable.h>
26 #include <asm/uv.h>
27 #include <asm-generic/barrier.h>
28 
29 static struct uv_cb_qui uvcb_qui;
30 static struct uv_cb_init uvcb_init;
31 static struct uv_cb_cgc uvcb_cgc;
32 static struct uv_cb_csc uvcb_csc;
33 
34 extern int diag308_load_reset(u64 code);
35 
36 struct cmd_list{
37 	const char *name;
38 	uint16_t cmd;
39 	uint16_t len;
40 	int call_bit;
41 };
42 
43 static void cpu_loop(void)
44 {
45 	for (;;) {}
46 }
47 
48 /*
49  * Checks if a memory area is protected as secure memory.
50  * Will return true if all pages are protected, false otherwise.
51  */
52 static bool access_check_3d(uint8_t *access_ptr, uint64_t len)
53 {
54 	assert(!(len & ~PAGE_MASK));
55 	assert(!((uint64_t)access_ptr & ~PAGE_MASK));
56 
57 	while (len) {
58 		expect_pgm_int();
59 		READ_ONCE(*access_ptr);
60 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
61 			return false;
62 		expect_pgm_int();
63 		WRITE_ONCE(*access_ptr, 42);
64 		if (clear_pgm_int() != PGM_INT_CODE_SECURE_STOR_ACCESS)
65 			return false;
66 
67 		access_ptr += PAGE_SIZE;
68 		len -= PAGE_SIZE;
69 	}
70 
71 	return true;
72 }
73 
74 static struct cmd_list cmds[] = {
75 	{ "init", UVC_CMD_INIT_UV, sizeof(struct uv_cb_init), BIT_UVC_CMD_INIT_UV },
76 	{ "create conf", UVC_CMD_CREATE_SEC_CONF, sizeof(struct uv_cb_cgc), BIT_UVC_CMD_CREATE_SEC_CONF },
77 	{ "destroy conf", UVC_CMD_DESTROY_SEC_CONF, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CONF },
78 	{ "create cpu", UVC_CMD_CREATE_SEC_CPU, sizeof(struct uv_cb_csc), BIT_UVC_CMD_CREATE_SEC_CPU },
79 	{ "destroy cpu", UVC_CMD_DESTROY_SEC_CPU, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_DESTROY_SEC_CPU },
80 	{ "conv to", UVC_CMD_CONV_TO_SEC_STOR, sizeof(struct uv_cb_cts), BIT_UVC_CMD_CONV_TO_SEC_STOR },
81 	{ "conv from", UVC_CMD_CONV_FROM_SEC_STOR, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_CONV_FROM_SEC_STOR },
82 	{ "set sec conf", UVC_CMD_SET_SEC_CONF_PARAMS, sizeof(struct uv_cb_ssc), BIT_UVC_CMD_SET_SEC_PARMS },
83 	{ "unpack", UVC_CMD_UNPACK_IMG, sizeof(struct uv_cb_unp), BIT_UVC_CMD_UNPACK_IMG },
84 	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_VERIFY_IMG },
85 	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET },
86 	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET_INITIAL },
87 	{ "conf clear reset", UVC_CMD_PREPARE_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_RESET },
88 	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET },
89 	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct uv_cb_cpu_set_state), BIT_UVC_CMD_CPU_SET_STATE },
90 	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_PIN_PAGE_SHARED },
91 	{ "unpin shared", UVC_CMD_UNPIN_PAGE_SHARED, sizeof(struct uv_cb_cts), BIT_UVC_CMD_UNPIN_PAGE_SHARED },
92 	{ NULL, 0, 0 },
93 };
94 
95 static void test_i3(void)
96 {
97 	struct uv_cb_header uvcb = {
98 		.cmd = UVC_CMD_INIT_UV,
99 		.len = sizeof(struct uv_cb_init),
100 	};
101 	unsigned long r1 = 0;
102 	int cc;
103 
104 	report_prefix_push("i3");
105 	expect_pgm_int();
106 	asm volatile(
107 		"0:	.insn rrf,0xB9A40000,%[r1],%[r2],4,2\n"
108 		"		ipm	%[cc]\n"
109 		"		srl	%[cc],28\n"
110 		: [cc] "=d" (cc)
111 		: [r1] "a" (r1), [r2] "a" (&uvcb)
112 		: "memory", "cc");
113 	check_pgm_int_code(PGM_INT_CODE_SPECIFICATION);
114 	report_prefix_pop();
115 }
116 
117 static void test_priv(void)
118 {
119 	struct uv_cb_header uvcb = {};
120 	uint16_t pgm;
121 	int i;
122 
123 	report_prefix_push("privileged");
124 	for (i = 0; cmds[i].name; i++) {
125 		expect_pgm_int();
126 		uvcb.cmd = cmds[i].cmd;
127 		uvcb.len = cmds[i].len;
128 		enter_pstate();
129 		uv_call_once(0, (uint64_t)&uvcb);
130 		pgm = clear_pgm_int();
131 		report(pgm == PGM_INT_CODE_PRIVILEGED_OPERATION, "%s", cmds[i].name);
132 	}
133 	report_prefix_pop();
134 }
135 
136 static void test_uv_uninitialized(void)
137 {
138 	struct uv_cb_header uvcb = {};
139 	int i;
140 
141 	report_prefix_push("uninitialized");
142 
143 	for (i = 0; cmds[i].name; i++) {
144 		if (cmds[i].cmd == UVC_CMD_INIT_UV)
145 			continue;
146 		expect_pgm_int();
147 		uvcb.cmd = cmds[i].cmd;
148 		uvcb.len = cmds[i].len;
149 		uv_call_once(0, (uint64_t)&uvcb);
150 		report(uvcb.rc == UVC_RC_INV_STATE, "%s", cmds[i].name);
151 	}
152 	report_prefix_pop();
153 }
154 
155 static void test_access(void)
156 {
157 	struct uv_cb_header *uvcb;
158 	void *pages =  alloc_pages(1);
159 	uint16_t pgm;
160 	int i;
161 
162 	/* Put UVCB on second page which we will protect later */
163 	uvcb = pages + PAGE_SIZE;
164 
165 	report_prefix_push("access");
166 
167 	report_prefix_push("non-crossing");
168 	protect_page(uvcb, PAGE_ENTRY_I);
169 	for (i = 0; cmds[i].name; i++) {
170 		expect_pgm_int();
171 		mb();
172 		uv_call_once(0, (uint64_t)uvcb);
173 		pgm = clear_pgm_int();
174 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
175 	}
176 	report_prefix_pop();
177 
178 	report_prefix_push("crossing");
179 	/*
180 	 * Put the header into the readable page 1, everything after
181 	 * the header will be on the second, invalid page.
182 	 */
183 	uvcb -= 1;
184 	for (i = 0; cmds[i].name; i++) {
185 		uvcb->cmd = cmds[i].cmd;
186 		uvcb->len = cmds[i].len;
187 
188 		expect_pgm_int();
189 		mb();
190 		uv_call_once(0, (uint64_t)uvcb);
191 		pgm = clear_pgm_int();
192 		report(pgm == PGM_INT_CODE_PAGE_TRANSLATION, "%s", cmds[i].name);
193 	}
194 	report_prefix_pop();
195 
196 	uvcb += 1;
197 	unprotect_page(uvcb, PAGE_ENTRY_I);
198 
199 	free_pages(pages);
200 	report_prefix_pop();
201 }
202 
203 static void test_config_destroy(void)
204 {
205 	int rc;
206 	struct uv_cb_nodata uvcb = {
207 		.header.cmd = UVC_CMD_DESTROY_SEC_CONF,
208 		.header.len = sizeof(uvcb),
209 		.handle = uvcb_cgc.guest_handle,
210 	};
211 
212 	report_prefix_push("dsc");
213 	uvcb.header.len -= 8;
214 	rc = uv_call(0, (uint64_t)&uvcb);
215 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
216 	       "hdr invalid length");
217 	uvcb.header.len += 8;
218 
219 	uvcb.handle += 1;
220 	rc = uv_call(0, (uint64_t)&uvcb);
221 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
222 	uvcb.handle -= 1;
223 
224 	rc = uv_call(0, (uint64_t)&uvcb);
225 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
226 	report_prefix_pop();
227 }
228 
229 static void test_cpu_destroy(void)
230 {
231 	int rc;
232 	struct uv_cb_nodata uvcb = {
233 		.header.len = sizeof(uvcb),
234 		.header.cmd = UVC_CMD_DESTROY_SEC_CPU,
235 		.handle = uvcb_csc.cpu_handle,
236 	};
237 
238 	report_prefix_push("dcpu");
239 
240 	uvcb.header.len -= 8;
241 	rc = uv_call(0, (uint64_t)&uvcb);
242 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
243 	       "hdr invalid length");
244 	uvcb.header.len += 8;
245 
246 	if (!machine_is_z15()) {
247 		uvcb.handle += 1;
248 		rc = uv_call(0, (uint64_t)&uvcb);
249 		report(rc == 1 && uvcb.header.rc == UVC_RC_INV_CHANDLE, "invalid handle");
250 		uvcb.handle -= 1;
251 	}
252 
253 	rc = uv_call(0, (uint64_t)&uvcb);
254 	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED, "success");
255 
256 	report_prefix_pop();
257 }
258 
259 static void test_set_se_header(void)
260 {
261 	struct uv_cb_ssc uvcb = {
262 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
263 		.header.len = sizeof(uvcb),
264 		.guest_handle = uvcb_cgc.guest_handle,
265 		.sec_header_origin = 0,
266 		.sec_header_len = 0x1000,
267 	};
268 	void *pages =  alloc_pages(1);
269 	void *inv;
270 	int rc;
271 
272 	report_prefix_push("sscp");
273 
274 	uvcb.header.len -= 8;
275 	rc = uv_call(0, (uint64_t)&uvcb);
276 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
277 	       "hdr invalid length");
278 	uvcb.header.len += 8;
279 
280 	uvcb.guest_handle += 1;
281 	rc = uv_call(0, (uint64_t)&uvcb);
282 	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE, "invalid handle");
283 	uvcb.guest_handle -= 1;
284 
285 	inv = pages + PAGE_SIZE;
286 	uvcb.sec_header_origin = (uint64_t)inv;
287 	protect_page(inv, PAGE_ENTRY_I);
288 	rc = uv_call(0, (uint64_t)&uvcb);
289 	report(rc == 1 && uvcb.header.rc == 0x103,
290 	       "se hdr access exception");
291 
292 	/*
293 	 * Shift the ptr so the first few DWORDs are accessible but
294 	 * the following are on an invalid page.
295 	 */
296 	uvcb.sec_header_origin -= 0x20;
297 	rc = uv_call(0, (uint64_t)&uvcb);
298 	report(rc == 1 && uvcb.header.rc == 0x103,
299 	       "se hdr access exception crossing");
300 	unprotect_page(inv, PAGE_ENTRY_I);
301 
302 	free_pages(pages);
303 	report_prefix_pop();
304 }
305 
306 static void test_cpu_create(void)
307 {
308 	int rc;
309 	unsigned long tmp;
310 
311 	report_prefix_push("csc");
312 	uvcb_csc.header.len = sizeof(uvcb_csc);
313 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
314 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
315 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
316 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
317 
318 	uvcb_csc.header.len -= 8;
319 	rc = uv_call(0, (uint64_t)&uvcb_csc);
320 	report(uvcb_csc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
321 	       !uvcb_csc.cpu_handle, "hdr invalid length");
322 	uvcb_csc.header.len += 8;
323 
324 	uvcb_csc.guest_handle += 1;
325 	rc = uv_call(0, (uint64_t)&uvcb_csc);
326 	report(uvcb_csc.header.rc == UVC_RC_INV_GHANDLE && rc == 1,
327 	       "invalid guest handle");
328 	uvcb_csc.guest_handle -= 1;
329 
330 	uvcb_csc.num = uvcb_qui.max_guest_cpus + 1;
331 	rc = uv_call(0, (uint64_t)&uvcb_csc);
332 	report(uvcb_csc.header.rc == 0x103 && rc == 1,
333 	       "invalid cpu #");
334 	uvcb_csc.num = 0;
335 
336 	tmp = uvcb_csc.stor_origin;
337 	uvcb_csc.stor_origin = get_max_ram_size() + PAGE_SIZE;
338 	rc = uv_call(0, (uint64_t)&uvcb_csc);
339 	report(uvcb_csc.header.rc == 0x105 && rc == 1,
340 	       "cpu stor inaccessible");
341 	uvcb_csc.stor_origin = tmp;
342 
343 	tmp = uvcb_csc.stor_origin;
344 	uvcb_csc.stor_origin = 0;
345 	rc = uv_call(0, (uint64_t)&uvcb_csc);
346 	report(uvcb_csc.header.rc == 0x106 && rc == 1,
347 	       "cpu stor in lowcore");
348 	uvcb_csc.stor_origin = tmp;
349 
350 	tmp = uvcb_csc.state_origin;
351 	uvcb_csc.state_origin = get_max_ram_size() + PAGE_SIZE;
352 	rc = uv_call(0, (uint64_t)&uvcb_csc);
353 	report(uvcb_csc.header.rc == 0x107 && rc == 1,
354 	       "SIE SD inaccessible");
355 	uvcb_csc.state_origin = tmp;
356 
357 	rc = uv_call(0, (uint64_t)&uvcb_csc);
358 	report(rc == 0 && uvcb_csc.header.rc == UVC_RC_EXECUTED &&
359 	       uvcb_csc.cpu_handle, "success");
360 
361 	rc = access_check_3d((uint8_t *)uvcb_csc.stor_origin,
362 			     uvcb_qui.cpu_stor_len);
363 	report(rc, "Storage protection");
364 
365 	tmp = uvcb_csc.stor_origin;
366 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
367 	rc = uv_call(0, (uint64_t)&uvcb_csc);
368 	report(rc == 1 && uvcb_csc.header.rc == 0x104, "already defined");
369 	uvcb_csc.stor_origin = tmp;
370 	report_prefix_pop();
371 }
372 
373 static void test_config_create(void)
374 {
375 	int rc;
376 	unsigned long vsize, tmp;
377 	static struct uv_cb_cgc uvcb;
378 
379 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
380 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
381 	report_prefix_push("cgc");
382 
383 	uvcb_cgc.guest_stor_origin = 0;
384 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
385 	vsize = uvcb_qui.conf_base_virt_stor_len +
386 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
387 
388 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
389 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
390 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
391 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
392 
393 	uvcb_cgc.header.len -= 8;
394 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
395 	report(uvcb_cgc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
396 	       !uvcb_cgc.guest_handle, "hdr invalid length");
397 	uvcb_cgc.header.len += 8;
398 
399 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr + (1UL << 20) * 2 + 1;
400 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
401 	report(uvcb_cgc.header.rc == 0x101 && rc == 1,
402 	       "MSO > max guest addr");
403 	uvcb_cgc.guest_stor_origin = 0;
404 
405 	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr - (1UL << 20);
406 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
407 	report(uvcb_cgc.header.rc == 0x102 && rc == 1,
408 	       "MSO + MSL > max guest addr");
409 	uvcb_cgc.guest_stor_origin = 0;
410 
411 	uvcb_cgc.guest_asce &= ~ASCE_P;
412 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
413 	report(uvcb_cgc.header.rc == 0x105 && rc == 1,
414 	       "ASCE private bit missing");
415 	uvcb_cgc.guest_asce |= ASCE_P;
416 
417 	uvcb_cgc.guest_asce |= 0x20;
418 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
419 	report(uvcb_cgc.header.rc == 0x105 && rc == 1,
420 	       "ASCE bit 58 set");
421 	uvcb_cgc.guest_asce &= ~0x20;
422 
423 	tmp = uvcb_cgc.conf_base_stor_origin;
424 	uvcb_cgc.conf_base_stor_origin = get_max_ram_size() + 8;
425 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
426 	report(uvcb_cgc.header.rc == 0x108 && rc == 1,
427 	       "base storage origin > available memory");
428 	uvcb_cgc.conf_base_stor_origin = tmp;
429 
430 	tmp = uvcb_cgc.conf_base_stor_origin;
431 	uvcb_cgc.conf_base_stor_origin = 0x1000;
432 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
433 	report(uvcb_cgc.header.rc == 0x109 && rc == 1,
434 	       "base storage origin contains lowcore");
435 	uvcb_cgc.conf_base_stor_origin = tmp;
436 
437 	if (smp_query_num_cpus() == 1) {
438 		sigp_retry(1, SIGP_SET_PREFIX,
439 			   uvcb_cgc.conf_var_stor_origin + PAGE_SIZE, NULL);
440 		rc = uv_call(0, (uint64_t)&uvcb_cgc);
441 		report(uvcb_cgc.header.rc == 0x10e && rc == 1 &&
442 		       !uvcb_cgc.guest_handle, "variable storage area contains lowcore");
443 		sigp_retry(1, SIGP_SET_PREFIX, 0x0, NULL);
444 	}
445 
446 	tmp = uvcb_cgc.guest_sca;
447 	uvcb_cgc.guest_sca = 0;
448 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
449 	report(uvcb_cgc.header.rc == 0x10c && rc == 1,
450 	       "sca == 0");
451 	uvcb_cgc.guest_sca = tmp;
452 
453 	tmp = uvcb_cgc.guest_sca;
454 	uvcb_cgc.guest_sca = get_max_ram_size() + PAGE_SIZE * 4;
455 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
456 	report(uvcb_cgc.header.rc == 0x10d && rc == 1,
457 	       "sca inaccessible");
458 	uvcb_cgc.guest_sca = tmp;
459 
460 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
461 	report(rc == 0 && uvcb_cgc.header.rc == UVC_RC_EXECUTED, "successful");
462 
463 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_base_stor_origin,
464 			     uvcb_qui.conf_base_phys_stor_len);
465 	report(rc, "Base storage protection");
466 
467 	rc = access_check_3d((uint8_t *)uvcb_cgc.conf_var_stor_origin, vsize);
468 	report(rc, "Variable storage protection");
469 
470 	uvcb_cgc.header.rc = 0;
471 	uvcb_cgc.header.rrc = 0;
472 	tmp = uvcb_cgc.guest_handle;
473 	uvcb_cgc.guest_handle = 0;
474 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
475 	report(uvcb_cgc.header.rc >= 0x100 && rc == 1, "reuse uvcb");
476 	uvcb_cgc.guest_handle = tmp;
477 
478 	/* Copy over most data from uvcb_cgc, so we have the ASCE that was used. */
479 	memcpy(&uvcb, &uvcb_cgc, sizeof(uvcb));
480 
481 	/* Reset the header and handle */
482 	uvcb.header.rc = 0;
483 	uvcb.header.rrc = 0;
484 	uvcb.guest_handle = 0;
485 
486 	/* Use new storage areas. */
487 	uvcb.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
488 	uvcb.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
489 
490 	rc = uv_call(0, (uint64_t)&uvcb);
491 	report(uvcb.header.rc >= 0x104 && rc == 1 && !uvcb.guest_handle,
492 	       "reuse ASCE");
493 	free((void *)uvcb.conf_base_stor_origin);
494 	free((void *)uvcb.conf_var_stor_origin);
495 
496 	/* Missing: 106, 10a, a0b */
497 	report_prefix_pop();
498 }
499 
500 static void test_init(void)
501 {
502 	int rc;
503 	uint64_t mem;
504 
505 	/* Donated storage needs to be over 2GB */
506 	mem = (uint64_t)memalign_pages_flags(SZ_1M, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
507 
508 	uvcb_init.header.len = sizeof(uvcb_init);
509 	uvcb_init.header.cmd = UVC_CMD_INIT_UV;
510 	uvcb_init.stor_origin = mem;
511 	uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
512 
513 	report_prefix_push("init");
514 	uvcb_init.header.len -= 8;
515 	rc = uv_call(0, (uint64_t)&uvcb_init);
516 	report(rc == 1 && uvcb_init.header.rc == UVC_RC_INV_LEN,
517 	       "hdr invalid length");
518 	uvcb_init.header.len += 8;
519 
520 	uvcb_init.stor_len -= 8;
521 	rc = uv_call(0, (uint64_t)&uvcb_init);
522 	report(rc == 1 && uvcb_init.header.rc == 0x103,
523 	       "storage invalid length");
524 	uvcb_init.stor_len += 8;
525 
526 	/* Storage origin is 1MB aligned, the length is 4KB aligned */
527 	uvcb_init.stor_origin = get_max_ram_size();
528 	rc = uv_call(0, (uint64_t)&uvcb_init);
529 	report(rc == 1 && (uvcb_init.header.rc == 0x104 || uvcb_init.header.rc == 0x105),
530 	       "storage origin invalid");
531 	uvcb_init.stor_origin = mem;
532 
533 	if (uvcb_init.stor_len >= HPAGE_SIZE) {
534 		uvcb_init.stor_origin = get_max_ram_size() - HPAGE_SIZE;
535 		rc = uv_call(0, (uint64_t)&uvcb_init);
536 		report(rc == 1 && uvcb_init.header.rc == 0x105,
537 		       "storage + length invalid");
538 		uvcb_init.stor_origin = mem;
539 	} else {
540 		report_skip("storage + length invalid, stor_len < HPAGE_SIZE");
541 	}
542 
543 	uvcb_init.stor_origin = 1UL << 30;
544 	rc = uv_call(0, (uint64_t)&uvcb_init);
545 	report(rc == 1 && uvcb_init.header.rc == 0x108,
546 	       "storage below 2GB");
547 	uvcb_init.stor_origin = mem;
548 
549 	smp_cpu_setup(1, PSW_WITH_CUR_MASK(cpu_loop));
550 	rc = uv_call(0, (uint64_t)&uvcb_init);
551 	report(rc == 1 && uvcb_init.header.rc == 0x102,
552 	       "too many running cpus");
553 	smp_cpu_stop(1);
554 
555 	rc = uv_call(0, (uint64_t)&uvcb_init);
556 	report(rc == 0 && uvcb_init.header.rc == UVC_RC_EXECUTED, "successful");
557 
558 	mem = (uint64_t)memalign(1UL << 31, uvcb_qui.uv_base_stor_len);
559 	rc = uv_call(0, (uint64_t)&uvcb_init);
560 	report(rc == 1 && uvcb_init.header.rc == 0x101, "double init");
561 	free((void *)mem);
562 
563 	report_prefix_pop();
564 }
565 
566 static void test_query(void)
567 {
568 	int i = 0, cc;
569 
570 	uvcb_qui.header.cmd = UVC_CMD_QUI;
571 	uvcb_qui.header.len = sizeof(uvcb_qui);
572 
573 	report_prefix_push("query");
574 	uvcb_qui.header.len = 0xa0;
575 	uv_call(0, (uint64_t)&uvcb_qui);
576 	report(uvcb_qui.header.rc == UVC_RC_INV_LEN, "length");
577 
578 	uvcb_qui.header.len = 0xa8;
579 	uv_call(0, (uint64_t)&uvcb_qui);
580 	report(uvcb_qui.header.rc == 0x100, "insf length");
581 
582 	uvcb_qui.header.len = sizeof(uvcb_qui);
583 	cc = uv_call(0, (uint64_t)&uvcb_qui);
584 	report((!cc && uvcb_qui.header.rc == UVC_RC_EXECUTED) ||
585 	       (cc == 1 && uvcb_qui.header.rc == 0x100),
586 		"successful query");
587 
588 	for (i = 0; cmds[i].name; i++)
589 		report(uv_query_test_call(cmds[i].call_bit), "%s", cmds[i].name);
590 
591 	report_prefix_pop();
592 }
593 
594 static struct cmd_list invalid_cmds[] = {
595 	{ "bogus", 0x4242, sizeof(struct uv_cb_header), -1},
596 	{ "share", UVC_CMD_SET_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_SET_SHARED_ACCESS },
597 	{ "unshare", UVC_CMD_REMOVE_SHARED_ACCESS, sizeof(struct uv_cb_share), BIT_UVC_CMD_REMOVE_SHARED_ACCESS },
598 	{ "attest", UVC_CMD_ATTESTATION, sizeof(struct uv_cb_attest), BIT_UVC_CMD_ATTESTATION },
599 	{ NULL, 0, 0 },
600 };
601 
602 static void test_invalid(void)
603 {
604 	struct uv_cb_header hdr = {};
605 	int i, cc;
606 
607 	report_prefix_push("invalid");
608 	for (i = 0; invalid_cmds[i].name; i++) {
609 		hdr.cmd = invalid_cmds[i].cmd;
610 		hdr.len = invalid_cmds[i].len;
611 		cc = uv_call(0, (uint64_t)&hdr);
612 		report(cc == 1 && hdr.rc == UVC_RC_INV_CMD &&
613 		       (invalid_cmds[i].call_bit == -1 || !uv_query_test_call(invalid_cmds[i].call_bit)),
614 		       "%s", invalid_cmds[i].name);
615 	}
616 	report_prefix_pop();
617 }
618 
619 static void setup_test_clear(void)
620 {
621 	unsigned long vsize;
622 	int rc;
623 
624 	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
625 	uvcb_cgc.header.len = sizeof(uvcb_cgc);
626 
627 	uvcb_cgc.guest_stor_origin = 0;
628 	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
629 	vsize = uvcb_qui.conf_base_virt_stor_len +
630 		((uvcb_cgc.guest_stor_len / (1UL << 20)) * uvcb_qui.conf_virt_var_stor_len);
631 
632 	uvcb_cgc.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
633 	uvcb_cgc.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE, vsize);
634 	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 * PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
635 	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4, PAGE_SIZE * 4);
636 
637 	rc = uv_call(0, (uint64_t)&uvcb_cgc);
638 	assert(rc == 0);
639 
640 	uvcb_csc.header.len = sizeof(uvcb_csc);
641 	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
642 	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
643 	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE, uvcb_qui.cpu_stor_len);
644 	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE, PAGE_SIZE);
645 
646 	rc = uv_call(0, (uint64_t)&uvcb_csc);
647 	assert(rc == 0);
648 }
649 
650 static void test_clear(void)
651 {
652 	uint64_t *tmp;
653 
654 	report_prefix_push("load normal reset");
655 
656 	/*
657 	 * Setup a config and a cpu so we can check if a diag308 reset
658 	 * clears the donated memory and makes the pages unsecure.
659 	 */
660 	setup_test_clear();
661 
662 	diag308_load_reset(1);
663 	sclp_console_setup();
664 
665 	tmp = (void *)uvcb_init.stor_origin;
666 	report(!*tmp, "uv init donated memory cleared");
667 
668 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
669 	report(!*tmp, "config base donated memory cleared");
670 
671 	tmp = (void *)uvcb_cgc.conf_base_stor_origin;
672 	report(!*tmp, "config variable donated memory cleared");
673 
674 	tmp = (void *)uvcb_csc.stor_origin;
675 	report(!*tmp, "cpu donated memory cleared after reset 1");
676 
677 	/* Check if uninitialized after reset */
678 	test_uv_uninitialized();
679 
680 	report_prefix_pop();
681 }
682 
683 static void setup_vmem(void)
684 {
685 	uint64_t asce;
686 
687 	setup_mmu(get_max_ram_size(), NULL);
688 	/*
689 	 * setup_mmu() will enable DAT and set the primary address
690 	 * space but we need to have a valid home space since UV calls
691 	 * take home space virtual addresses.
692 	 *
693 	 * Hence we just copy the primary asce into the home space.
694 	 */
695 	asce = stctg(1);
696 	lctlg(13, asce);
697 }
698 
699 int main(void)
700 {
701 	bool has_uvc = test_facility(158);
702 
703 	report_prefix_push("uvc");
704 	if (!has_uvc) {
705 		report_skip("Ultravisor call facility is not available");
706 		goto done;
707 	}
708 	if (!uv_os_is_host()) {
709 		report_skip("This test needs to be run in a UV host environment");
710 		goto done;
711 	}
712 
713 	test_i3();
714 	test_priv();
715 	test_invalid();
716 	test_uv_uninitialized();
717 	test_query();
718 	test_init();
719 
720 	setup_vmem();
721 	test_access();
722 
723 	test_config_create();
724 	test_cpu_create();
725 	test_set_se_header();
726 	test_cpu_destroy();
727 	test_config_destroy();
728 	test_clear();
729 
730 done:
731 	return report_summary();
732 }
733