xref: /kvm-unit-tests/lib/s390x/sie.c (revision f07210607d4da37b907bffe6b1286f6ff3e07487)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Library for managing various aspects of guests
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *  Janosch Frank <frankja@linux.ibm.com>
9  */
10 
11 #include <asm/barrier.h>
12 #include <bitops.h>
13 #include <libcflat.h>
14 #include <sie.h>
15 #include <asm/page.h>
16 #include <libcflat.h>
17 #include <alloc_page.h>
18 #include <vmalloc.h>
19 #include <sclp.h>
20 
21 void sie_expect_validity(struct vm *vm)
22 {
23 	vm->validity_expected = true;
24 }
25 
26 uint16_t sie_get_validity(struct vm *vm)
27 {
28 	/*
29 	 * 0xffff will never be returned by SIE, so we can indicate a
30 	 * missing validity via this value.
31 	 */
32 	if (vm->sblk->icptcode != ICPT_VALIDITY)
33 		return 0xffff;
34 
35 	return vm->sblk->ipb >> 16;
36 }
37 
38 void sie_check_validity(struct vm *vm, uint16_t vir_exp)
39 {
40 	uint16_t vir = sie_get_validity(vm);
41 
42 	report(vir_exp == vir, "VALIDITY: %x", vir);
43 }
44 
45 void sie_handle_validity(struct vm *vm)
46 {
47 	if (vm->sblk->icptcode != ICPT_VALIDITY)
48 		return;
49 
50 	if (!vm->validity_expected)
51 		report_abort("VALIDITY: %x", sie_get_validity(vm));
52 	vm->validity_expected = false;
53 }
54 
55 void sie(struct vm *vm)
56 {
57 	if (vm->sblk->sdf == 2)
58 		memcpy(vm->sblk->pv_grregs, vm->save_area.guest.grs,
59 		       sizeof(vm->save_area.guest.grs));
60 
61 	/* Reset icptcode so we don't trip over it below */
62 	vm->sblk->icptcode = 0;
63 
64 	while (vm->sblk->icptcode == 0) {
65 		sie64a(vm->sblk, &vm->save_area);
66 		sie_handle_validity(vm);
67 	}
68 	vm->save_area.guest.grs[14] = vm->sblk->gg14;
69 	vm->save_area.guest.grs[15] = vm->sblk->gg15;
70 
71 	if (vm->sblk->sdf == 2)
72 		memcpy(vm->save_area.guest.grs, vm->sblk->pv_grregs,
73 		       sizeof(vm->save_area.guest.grs));
74 }
75 
76 void sie_guest_sca_create(struct vm *vm)
77 {
78 	vm->sca = (struct esca_block *)alloc_page();
79 
80 	/* Let's start out with one page of ESCA for now */
81 	vm->sblk->scaoh = ((uint64_t)vm->sca >> 32);
82 	vm->sblk->scaol = (uint64_t)vm->sca & ~0x3fU;
83 	vm->sblk->ecb2 |= ECB2_ESCA;
84 
85 	/* Enable SIGP sense running interpretation */
86 	vm->sblk->ecb |= ECB_SRSI;
87 
88 	/* We assume that cpu 0 is always part of the vm */
89 	vm->sca->mcn[0] = BIT(63);
90 	vm->sca->cpu[0].sda = (uint64_t)vm->sblk;
91 }
92 
93 /* Initializes the struct vm members like the SIE control block. */
94 void sie_guest_create(struct vm *vm, uint64_t guest_mem, uint64_t guest_mem_len)
95 {
96 	vm->sblk = alloc_page();
97 	memset(vm->sblk, 0, PAGE_SIZE);
98 	vm->sblk->cpuflags = CPUSTAT_ZARCH | CPUSTAT_RUNNING;
99 	vm->sblk->ihcpu = 0xffff;
100 	vm->sblk->prefix = 0;
101 
102 	/* Guest memory chunks are always 1MB */
103 	assert(!(guest_mem_len & ~HPAGE_MASK));
104 	vm->guest_mem = (uint8_t *)guest_mem;
105 	/* For non-PV guests we re-use the host's ASCE for ease of use */
106 	vm->save_area.guest.asce = stctg(1);
107 	/* Currently MSO/MSL is the easiest option */
108 	vm->sblk->mso = (uint64_t)guest_mem;
109 	vm->sblk->msl = (uint64_t)guest_mem + ((guest_mem_len - 1) & HPAGE_MASK);
110 
111 	/* CRYCB needs to be in the first 2GB */
112 	vm->crycb = alloc_pages_flags(0, AREA_DMA31);
113 	vm->sblk->crycbd = (uint32_t)(uintptr_t)vm->crycb;
114 }
115 
116 /**
117  * sie_guest_alloc() - Allocate memory for a guest and map it in virtual address
118  * space such that it is properly aligned.
119  * @guest_size: the desired size of the guest in bytes.
120  */
121 uint8_t *sie_guest_alloc(uint64_t guest_size)
122 {
123 	static unsigned long guest_counter = 1;
124 	u8 *guest_phys, *guest_virt;
125 	unsigned long i;
126 	pgd_t *root;
127 
128 	setup_vm();
129 	root = (pgd_t *)(stctg(1) & PAGE_MASK);
130 
131 	/*
132 	 * Start of guest memory in host virtual space needs to be aligned to
133 	 * 2GB for some environments. It also can't be at 2GB since the memory
134 	 * allocator stores its page_states metadata there.
135 	 * Thus we use the next multiple of 4GB after the end of physical
136 	 * mapping. This also leaves space after end of physical memory so the
137 	 * page immediately after physical memory is guaranteed not to be
138 	 * present.
139 	 */
140 	guest_virt = (uint8_t *)ALIGN(get_ram_size() + guest_counter * 4UL * SZ_1G, SZ_2G);
141 	guest_counter++;
142 
143 	guest_phys = alloc_pages(get_order(guest_size) - 12);
144 	/*
145 	 * Establish a new mapping of the guest memory so it can be 2GB aligned
146 	 * without actually requiring 2GB physical memory.
147 	 */
148 	for (i = 0; i < guest_size; i += PAGE_SIZE) {
149 		install_page(root, __pa(guest_phys + i), guest_virt + i);
150 	}
151 	memset(guest_virt, 0, guest_size);
152 
153 	return guest_virt;
154 }
155 
156 /* Frees the memory that was gathered on initialization */
157 void sie_guest_destroy(struct vm *vm)
158 {
159 	free_page(vm->crycb);
160 	free_page(vm->sblk);
161 	if (vm->sblk->ecb2 & ECB2_ESCA)
162 		free_page(vm->sca);
163 }
164