xref: /kvm-unit-tests/lib/s390x/sie.c (revision 1f08a91a41402b0e032ecce8ed1b5952cbfca0ea)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Library for managing various aspects of guests
4  *
5  * Copyright (c) 2021 IBM Corp
6  *
7  * Authors:
8  *  Janosch Frank <frankja@linux.ibm.com>
9  */
10 
11 #include <asm/barrier.h>
12 #include <bitops.h>
13 #include <libcflat.h>
14 #include <sie.h>
15 #include <asm/page.h>
16 #include <asm/interrupt.h>
17 #include <alloc_page.h>
18 #include <vmalloc.h>
19 #include <sclp.h>
20 
sie_expect_validity(struct vm * vm)21 void sie_expect_validity(struct vm *vm)
22 {
23 	vm->validity_expected = true;
24 }
25 
sie_get_validity(struct vm * vm)26 uint16_t sie_get_validity(struct vm *vm)
27 {
28 	/*
29 	 * 0xffff will never be returned by SIE, so we can indicate a
30 	 * missing validity via this value.
31 	 */
32 	if (vm->sblk->icptcode != ICPT_VALIDITY)
33 		return 0xffff;
34 
35 	return vm->sblk->ipb >> 16;
36 }
37 
sie_check_validity(struct vm * vm,uint16_t vir_exp)38 void sie_check_validity(struct vm *vm, uint16_t vir_exp)
39 {
40 	uint16_t vir = sie_get_validity(vm);
41 
42 	report(vir_exp == vir, "VALIDITY: %x", vir);
43 }
44 
sie_handle_validity(struct vm * vm)45 void sie_handle_validity(struct vm *vm)
46 {
47 	if (vm->sblk->icptcode != ICPT_VALIDITY)
48 		return;
49 
50 	if (!vm->validity_expected)
51 		report_abort("VALIDITY: %x", sie_get_validity(vm));
52 	vm->validity_expected = false;
53 }
54 
sie(struct vm * vm)55 void sie(struct vm *vm)
56 {
57 	uint64_t old_cr13;
58 
59 	/* When a pgm int code is set, we'll never enter SIE below. */
60 	assert(!read_pgm_int_code());
61 
62 	if (sie_is_pv(vm))
63 		memcpy(vm->sblk->pv_grregs, vm->save_area.guest.grs,
64 		       sizeof(vm->save_area.guest.grs));
65 
66 	/* Reset icptcode so we don't trip over it below */
67 	vm->sblk->icptcode = 0;
68 
69 	/*
70 	 * Set up home address space to match primary space. Instead of running
71 	 * in home space all the time, we switch every time in sie() because:
72 	 * - tests that depend on running in primary space mode don't need to be
73 	 *   touched
74 	 * - it avoids regressions in tests
75 	 * - switching every time makes it easier to extend this in the future,
76 	 *   for example to allow tests to run in whatever space they want
77 	 */
78 	old_cr13 = stctg(13);
79 	lctlg(13, stctg(1));
80 
81 	/* switch to home space so guest tables can be different from host */
82 	psw_mask_set_bits(PSW_MASK_HOME);
83 
84 	/* also handle all interruptions in home space while in SIE */
85 	irq_set_dat_mode(true, AS_HOME);
86 
87 	/* leave SIE when we have an intercept or an interrupt so the test can react to it */
88 	while (vm->sblk->icptcode == 0 && !read_pgm_int_code()) {
89 		sie64a(vm->sblk, &vm->save_area);
90 		sie_handle_validity(vm);
91 	}
92 	vm->save_area.guest.grs[14] = vm->sblk->gg14;
93 	vm->save_area.guest.grs[15] = vm->sblk->gg15;
94 
95 	irq_set_dat_mode(true, AS_PRIM);
96 	psw_mask_clear_bits(PSW_MASK_HOME);
97 
98 	/* restore the old CR 13 */
99 	lctlg(13, old_cr13);
100 
101 	if (sie_is_pv(vm))
102 		memcpy(vm->save_area.guest.grs, vm->sblk->pv_grregs,
103 		       sizeof(vm->save_area.guest.grs));
104 }
105 
sie_guest_sca_create(struct vm * vm)106 void sie_guest_sca_create(struct vm *vm)
107 {
108 	vm->sca = (struct esca_block *)alloc_page();
109 
110 	/* Let's start out with one page of ESCA for now */
111 	vm->sblk->scaoh = ((uint64_t)vm->sca >> 32);
112 	vm->sblk->scaol = (uint64_t)vm->sca & ~0x3fU;
113 	vm->sblk->ecb2 |= ECB2_ESCA;
114 
115 	/* Enable SIGP sense running interpretation */
116 	vm->sblk->ecb |= ECB_SRSI;
117 
118 	/* We assume that cpu 0 is always part of the vm */
119 	vm->sca->mcn[0] = BIT(63);
120 	vm->sca->cpu[0].sda = (uint64_t)vm->sblk;
121 }
122 
123 /* Initializes the struct vm members like the SIE control block. */
sie_guest_create(struct vm * vm,uint64_t guest_mem,uint64_t guest_mem_len)124 void sie_guest_create(struct vm *vm, uint64_t guest_mem, uint64_t guest_mem_len)
125 {
126 	vm->sblk = alloc_page();
127 	memset(vm->sblk, 0, PAGE_SIZE);
128 	vm->sblk->cpuflags = CPUSTAT_ZARCH | CPUSTAT_RUNNING;
129 	vm->sblk->ihcpu = 0xffff;
130 	vm->sblk->prefix = 0;
131 
132 	/* Guest memory chunks are always 1MB */
133 	assert(!(guest_mem_len & ~HPAGE_MASK));
134 	vm->guest_mem = (uint8_t *)guest_mem;
135 	/* For non-PV guests we re-use the host's ASCE for ease of use */
136 	vm->save_area.guest.asce = stctg(1);
137 	/* Currently MSO/MSL is the easiest option */
138 	vm->sblk->mso = (uint64_t)guest_mem;
139 	vm->sblk->msl = (uint64_t)guest_mem + ((guest_mem_len - 1) & HPAGE_MASK);
140 
141 	/* CRYCB needs to be in the first 2GB */
142 	vm->crycb = alloc_pages_flags(0, AREA_DMA31);
143 	vm->sblk->crycbd = (uint32_t)(uintptr_t)vm->crycb;
144 }
145 
146 /**
147  * sie_guest_alloc() - Allocate memory for a guest and map it in virtual address
148  * space such that it is properly aligned.
149  * @guest_size: the desired size of the guest in bytes.
150  */
sie_guest_alloc(uint64_t guest_size)151 uint8_t *sie_guest_alloc(uint64_t guest_size)
152 {
153 	static unsigned long guest_counter = 1;
154 	u8 *guest_phys, *guest_virt;
155 	unsigned long i;
156 	pgd_t *root;
157 
158 	setup_vm();
159 	root = (pgd_t *)(stctg(1) & PAGE_MASK);
160 
161 	/*
162 	 * Start of guest memory in host virtual space needs to be aligned to
163 	 * 2GB for some environments. It also can't be at 2GB since the memory
164 	 * allocator stores its page_states metadata there.
165 	 * Thus we use the next multiple of 4GB after the end of physical
166 	 * mapping. This also leaves space after end of physical memory so the
167 	 * page immediately after physical memory is guaranteed not to be
168 	 * present.
169 	 */
170 	guest_virt = (uint8_t *)ALIGN(get_ram_size() + guest_counter * 4UL * SZ_1G, SZ_2G);
171 	guest_counter++;
172 
173 	guest_phys = alloc_pages(get_order(guest_size) - 12);
174 	/*
175 	 * Establish a new mapping of the guest memory so it can be 2GB aligned
176 	 * without actually requiring 2GB physical memory.
177 	 */
178 	for (i = 0; i < guest_size; i += PAGE_SIZE) {
179 		install_page(root, __pa(guest_phys + i), guest_virt + i);
180 	}
181 	memset(guest_virt, 0, guest_size);
182 
183 	return guest_virt;
184 }
185 
186 /* Frees the memory that was gathered on initialization */
sie_guest_destroy(struct vm * vm)187 void sie_guest_destroy(struct vm *vm)
188 {
189 	free_page(vm->crycb);
190 	free_page(vm->sblk);
191 	if (vm->sblk->ecb2 & ECB2_ESCA)
192 		free_page(vm->sca);
193 }
194