1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Ultravisor related functionality
4 *
5 * Copyright 2020 IBM Corp.
6 *
7 * Authors:
8 * Janosch Frank <frankja@linux.ibm.com>
9 */
10 #include <libcflat.h>
11 #include <bitops.h>
12 #include <alloc.h>
13 #include <alloc_page.h>
14 #include <asm/page.h>
15 #include <asm/arch_def.h>
16
17 #include <asm/facility.h>
18 #include <asm/uv.h>
19 #include <uv.h>
20 #include <sie.h>
21 #include <snippet.h>
22
23 static struct uv_cb_qui uvcb_qui = {
24 .header.cmd = UVC_CMD_QUI,
25 .header.len = sizeof(uvcb_qui),
26 };
27 static uint64_t uv_init_mem;
28
29
uv_os_is_guest(void)30 bool uv_os_is_guest(void)
31 {
32 return test_facility(158) &&
33 uv_query_test_call(BIT_UVC_CMD_SET_SHARED_ACCESS) &&
34 uv_query_test_call(BIT_UVC_CMD_REMOVE_SHARED_ACCESS);
35 }
36
uv_os_is_host(void)37 bool uv_os_is_host(void)
38 {
39 return test_facility(158) && uv_query_test_call(BIT_UVC_CMD_INIT_UV);
40 }
41
uv_host_requirement_checks(void)42 bool uv_host_requirement_checks(void)
43 {
44 if (!test_facility(158)) {
45 report_skip("UV Call facility unavailable");
46 return false;
47 }
48 if (!sclp_facilities.has_sief2) {
49 report_skip("SIEF2 facility unavailable");
50 return false;
51 }
52 if (get_ram_size() < SNIPPET_PV_MIN_MEM_SIZE) {
53 report_skip("Not enough memory. This test needs about %ld MB of memory",
54 SNIPPET_PV_MIN_MEM_SIZE / SZ_1M);
55 return false;
56 }
57
58 return true;
59 }
60
uv_query_test_call(unsigned int nr)61 bool uv_query_test_call(unsigned int nr)
62 {
63 /* Query needs to be called first */
64 assert(uvcb_qui.header.rc);
65 assert(nr < BITS_PER_LONG * ARRAY_SIZE(uvcb_qui.inst_calls_list));
66
67 return test_bit_inv(nr, uvcb_qui.inst_calls_list);
68 }
69
uv_get_query_data(void)70 const struct uv_cb_qui *uv_get_query_data(void)
71 {
72 /* Query needs to be called first */
73 assert(uvcb_qui.header.rc == 1 || uvcb_qui.header.rc == 0x100);
74
75 return &uvcb_qui;
76 }
77
uv_setup(void)78 int uv_setup(void)
79 {
80 if (!test_facility(158))
81 return 0;
82
83 uv_call(0, (u64)&uvcb_qui);
84
85 assert(uvcb_qui.header.rc == 1 || uvcb_qui.header.rc == 0x100);
86 return 1;
87 }
88
uv_init(void)89 void uv_init(void)
90 {
91 struct uv_cb_init uvcb_init = {
92 .header.len = sizeof(uvcb_init),
93 .header.cmd = UVC_CMD_INIT_UV,
94 };
95 static bool initialized;
96 int cc;
97
98 /* Let's not do this twice */
99 if (initialized)
100 return;
101 /* Query is done on initialization but let's check anyway */
102 assert(uvcb_qui.header.rc == 1 || uvcb_qui.header.rc == 0x100);
103
104 /* Donated storage needs to be over 2GB aligned to 1MB */
105 uv_init_mem = (uint64_t)memalign_pages_flags(HPAGE_SIZE, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
106 uvcb_init.stor_origin = uv_init_mem;
107 uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
108
109 cc = uv_call(0, (uint64_t)&uvcb_init);
110 assert(cc == 0);
111 initialized = true;
112 }
113
114 /*
115 * Create a new ASCE for the UV config because they can't be shared
116 * for security reasons. We just simply copy the top most table into a
117 * fresh set of allocated pages and use those pages as the asce.
118 */
create_asce(void)119 static uint64_t create_asce(void)
120 {
121 void *pgd_new, *pgd_old;
122 uint64_t asce = stctg(1);
123
124 pgd_new = memalign_pages(PAGE_SIZE, PAGE_SIZE * 4);
125 pgd_old = (void *)(asce & PAGE_MASK);
126
127 memcpy(pgd_new, pgd_old, PAGE_SIZE * 4);
128
129 asce = __pa(pgd_new) | ASCE_P | (asce & (ASCE_DT | ASCE_TL));
130 return asce;
131 }
132
uv_create_guest(struct vm * vm)133 void uv_create_guest(struct vm *vm)
134 {
135 struct uv_cb_cgc uvcb_cgc = {
136 .header.cmd = UVC_CMD_CREATE_SEC_CONF,
137 .header.len = sizeof(uvcb_cgc),
138 };
139 struct uv_cb_csc uvcb_csc = {
140 .header.len = sizeof(uvcb_csc),
141 .header.cmd = UVC_CMD_CREATE_SEC_CPU,
142 .state_origin = (uint64_t)vm->sblk,
143 .num = 0,
144 };
145 unsigned long vsize;
146 int cc;
147
148 uvcb_cgc.guest_stor_origin = vm->sblk->mso;
149 uvcb_cgc.guest_stor_len = vm->sblk->msl - vm->sblk->mso + SZ_1M;
150
151 /* Config allocation */
152 vsize = uvcb_qui.conf_base_virt_stor_len +
153 ((uvcb_cgc.guest_stor_len / HPAGE_SIZE) * uvcb_qui.conf_virt_var_stor_len);
154
155 vm->uv.conf_base_stor = memalign_pages_flags(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len, 0);
156 /*
157 * This allocation needs to be below the max guest storage
158 * address so let's simply put it into the physical memory
159 */
160 vm->uv.conf_var_stor = memalign_pages_flags(PAGE_SIZE, vsize,0);
161 uvcb_cgc.conf_base_stor_origin = (uint64_t)vm->uv.conf_base_stor;
162 uvcb_cgc.conf_var_stor_origin = (uint64_t)vm->uv.conf_var_stor;
163
164 /* CPU allocation */
165 vm->uv.cpu_stor = memalign_pages_flags(PAGE_SIZE, uvcb_qui.cpu_stor_len, 0);
166 uvcb_csc.stor_origin = (uint64_t)vm->uv.cpu_stor;
167
168 uvcb_cgc.guest_asce = create_asce();
169 vm->save_area.guest.asce = uvcb_cgc.guest_asce;
170 uvcb_cgc.guest_sca = (uint64_t)vm->sca;
171
172 cc = uv_call(0, (uint64_t)&uvcb_cgc);
173 assert(!cc);
174
175 vm->uv.vm_handle = uvcb_cgc.guest_handle;
176 uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
177 cc = uv_call(0, (uint64_t)&uvcb_csc);
178 vm->uv.vcpu_handle = uvcb_csc.cpu_handle;
179 assert(!cc);
180
181 /*
182 * Convert guest to format 4:
183 *
184 * - Set format 4
185 * - Write UV handles into sblk
186 * - Allocate and set SIDA
187 */
188 vm->sblk->sdf = 2;
189 vm->sblk->sidad = (uint64_t)alloc_page();
190 vm->sblk->pv_handle_cpu = uvcb_csc.cpu_handle;
191 vm->sblk->pv_handle_config = uvcb_cgc.guest_handle;
192 }
193
uv_destroy_guest(struct vm * vm)194 void uv_destroy_guest(struct vm *vm)
195 {
196 int cc;
197 u16 rc, rrc;
198
199 cc = uv_cmd_nodata(vm->sblk->pv_handle_cpu,
200 UVC_CMD_DESTROY_SEC_CPU, &rc, &rrc);
201 assert(cc == 0);
202 free_page((void *)vm->sblk->sidad);
203 free_pages(vm->uv.cpu_stor);
204
205 cc = uv_cmd_nodata(vm->sblk->pv_handle_config,
206 UVC_CMD_DESTROY_SEC_CONF, &rc, &rrc);
207 assert(cc == 0);
208 free_pages(vm->uv.conf_base_stor);
209 free_pages(vm->uv.conf_var_stor);
210
211 free_pages((void *)(vm->uv.asce & PAGE_MASK));
212 memset(&vm->uv, 0, sizeof(vm->uv));
213
214 /* Convert the sblk back to non-PV */
215 vm->save_area.guest.asce = stctg(1);
216 vm->sblk->sdf = 0;
217 vm->sblk->sidad = 0;
218 vm->sblk->pv_handle_cpu = 0;
219 vm->sblk->pv_handle_config = 0;
220 }
221
uv_unpack(struct vm * vm,uint64_t addr,uint64_t len,uint64_t tweak)222 int uv_unpack(struct vm *vm, uint64_t addr, uint64_t len, uint64_t tweak)
223 {
224 int i, cc;
225
226 for (i = 0; i < len / PAGE_SIZE; i++) {
227 cc = uv_unp_page(vm->uv.vm_handle, addr, tweak, i * PAGE_SIZE);
228 assert(!cc);
229 addr += PAGE_SIZE;
230 }
231 return cc;
232 }
233
uv_verify_load(struct vm * vm)234 void uv_verify_load(struct vm *vm)
235 {
236 uint16_t rc, rrc;
237 int cc;
238
239 cc = uv_cmd_nodata(vm->uv.vm_handle, UVC_CMD_VERIFY_IMG, &rc, &rrc);
240 assert(!cc);
241 cc = uv_set_cpu_state(vm->uv.vcpu_handle, PV_CPU_STATE_OPR_LOAD);
242 assert(!cc);
243 }
244