1 /*
2 * Protected Virtualization functions
3 *
4 * Copyright IBM Corp. 2020
5 * Author(s):
6 * Janosch Frank <frankja@linux.ibm.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
10 * directory.
11 */
12 #include "qemu/osdep.h"
13
14 #include <linux/kvm.h>
15
16 #include "qemu/units.h"
17 #include "qapi/error.h"
18 #include "qemu/error-report.h"
19 #include "system/kvm.h"
20 #include "system/cpus.h"
21 #include "qom/object_interfaces.h"
22 #include "system/confidential-guest-support.h"
23 #include "hw/s390x/ipl.h"
24 #include "hw/s390x/sclp.h"
25 #include "target/s390x/kvm/kvm_s390x.h"
26 #include "target/s390x/kvm/pv.h"
27
28 static bool info_valid;
29 static struct kvm_s390_pv_info_vm info_vm;
30 static struct kvm_s390_pv_info_dump info_dump;
31
__s390_pv_cmd(uint32_t cmd,const char * cmdname,void * data,struct S390PVResponse * pv_resp)32 static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
33 struct S390PVResponse *pv_resp)
34 {
35 struct kvm_pv_cmd pv_cmd = {
36 .cmd = cmd,
37 .data = (uint64_t)data,
38 };
39 int rc;
40
41 do {
42 rc = kvm_vm_ioctl(kvm_state, KVM_S390_PV_COMMAND, &pv_cmd);
43 } while (rc == -EINTR);
44
45 if (rc) {
46 error_report("KVM PV command %d (%s) failed: header rc %x rrc %x "
47 "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc,
48 rc);
49 }
50 if (pv_resp) {
51 pv_resp->cmd = cmd;
52 pv_resp->rc = pv_cmd.rc;
53 pv_resp->rrc = pv_cmd.rrc;
54 }
55 return rc;
56 }
57
58 /*
59 * This macro lets us pass the command as a string to the function so
60 * we can print it on an error.
61 */
62 #define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL)
63 #define s390_pv_cmd_pv_resp(cmd, data, pv_resp) \
64 __s390_pv_cmd(cmd, #cmd, data, pv_resp)
65
s390_pv_cmd_exit(uint32_t cmd,void * data)66 static void s390_pv_cmd_exit(uint32_t cmd, void *data)
67 {
68 if (s390_pv_cmd(cmd, data)) {
69 exit(1);
70 }
71 }
72
s390_pv_query_info(void)73 int s390_pv_query_info(void)
74 {
75 struct kvm_s390_pv_info info = {
76 .header.id = KVM_PV_INFO_VM,
77 .header.len_max = sizeof(info.header) + sizeof(info.vm),
78 };
79 int rc;
80
81 /* Info API's first user is dump so they are bundled */
82 if (!kvm_s390_get_protected_dump()) {
83 return 0;
84 }
85
86 rc = s390_pv_cmd(KVM_PV_INFO, &info);
87 if (rc) {
88 error_report("KVM PV INFO cmd %x failed: %s",
89 info.header.id, strerror(-rc));
90 return rc;
91 }
92 memcpy(&info_vm, &info.vm, sizeof(info.vm));
93
94 info.header.id = KVM_PV_INFO_DUMP;
95 info.header.len_max = sizeof(info.header) + sizeof(info.dump);
96 rc = s390_pv_cmd(KVM_PV_INFO, &info);
97 if (rc) {
98 error_report("KVM PV INFO cmd %x failed: %s",
99 info.header.id, strerror(-rc));
100 return rc;
101 }
102
103 memcpy(&info_dump, &info.dump, sizeof(info.dump));
104 info_valid = true;
105
106 return rc;
107 }
108
s390_pv_vm_enable(void)109 int s390_pv_vm_enable(void)
110 {
111 return s390_pv_cmd(KVM_PV_ENABLE, NULL);
112 }
113
s390_pv_vm_disable(void)114 void s390_pv_vm_disable(void)
115 {
116 s390_pv_cmd_exit(KVM_PV_DISABLE, NULL);
117 }
118
s390_pv_do_unprot_async_fn(void * p)119 static void *s390_pv_do_unprot_async_fn(void *p)
120 {
121 s390_pv_cmd_exit(KVM_PV_ASYNC_CLEANUP_PERFORM, NULL);
122 return NULL;
123 }
124
s390_pv_vm_try_disable_async(S390CcwMachineState * ms)125 bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
126 {
127 /*
128 * t is only needed to create the thread; once qemu_thread_create
129 * returns, it can safely be discarded.
130 */
131 QemuThread t;
132
133 /*
134 * If the feature is not present or if the VM is not larger than 2 GiB,
135 * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it.
136 */
137 if (s390_get_memory_limit(ms) <= 2 * GiB ||
138 !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) {
139 return false;
140 }
141 if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) {
142 return false;
143 }
144
145 qemu_thread_create(&t, "async_cleanup", s390_pv_do_unprot_async_fn, NULL,
146 QEMU_THREAD_DETACHED);
147
148 return true;
149 }
150
151 #define UV_RC_SSC_INVAL_HOSTKEY 0x0108
s390_pv_set_sec_parms(uint64_t origin,uint64_t length,struct S390PVResponse * pv_resp,Error ** errp)152 int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
153 struct S390PVResponse *pv_resp, Error **errp)
154 {
155 int ret;
156 struct kvm_s390_pv_sec_parm args = {
157 .origin = origin,
158 .length = length,
159 };
160
161 ret = s390_pv_cmd_pv_resp(KVM_PV_SET_SEC_PARMS, &args, pv_resp);
162 if (ret) {
163 error_setg(errp, "Failed to set secure execution parameters");
164 if (pv_resp->rc == UV_RC_SSC_INVAL_HOSTKEY) {
165 error_append_hint(errp, "Please check whether the image is "
166 "correctly encrypted for this host\n");
167 }
168 }
169
170 return ret;
171 }
172
173 /*
174 * Called for each component in the SE type IPL parameter block 0.
175 */
s390_pv_unpack(uint64_t addr,uint64_t size,uint64_t tweak,struct S390PVResponse * pv_resp)176 int s390_pv_unpack(uint64_t addr, uint64_t size,
177 uint64_t tweak, struct S390PVResponse *pv_resp)
178 {
179 struct kvm_s390_pv_unp args = {
180 .addr = addr,
181 .size = size,
182 .tweak = tweak,
183 };
184
185 return s390_pv_cmd_pv_resp(KVM_PV_UNPACK, &args, pv_resp);
186 }
187
s390_pv_prep_reset(void)188 void s390_pv_prep_reset(void)
189 {
190 s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL);
191 }
192
s390_pv_verify(struct S390PVResponse * pv_resp)193 int s390_pv_verify(struct S390PVResponse *pv_resp)
194 {
195 return s390_pv_cmd_pv_resp(KVM_PV_VERIFY, NULL, pv_resp);
196 }
197
s390_pv_unshare(void)198 void s390_pv_unshare(void)
199 {
200 s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL);
201 }
202
s390_pv_inject_reset_error(CPUState * cs,struct S390PVResponse pv_resp)203 void s390_pv_inject_reset_error(CPUState *cs,
204 struct S390PVResponse pv_resp)
205 {
206 int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4;
207 CPUS390XState *env = &S390_CPU(cs)->env;
208
209 union {
210 struct {
211 uint16_t pv_cmd;
212 uint16_t pv_rrc;
213 uint16_t pv_rc;
214 uint16_t diag_rc;
215 };
216 uint64_t regs;
217 } resp = {
218 .pv_cmd = pv_resp.cmd,
219 .pv_rrc = pv_resp.rrc,
220 .pv_rc = pv_resp.rc,
221 .diag_rc = DIAG_308_RC_INVAL_FOR_PV
222 };
223
224 /* Report that we are unable to enter protected mode */
225 env->regs[r1 + 1] = resp.regs;
226 }
227
kvm_s390_pv_dmp_get_size_cpu(void)228 uint64_t kvm_s390_pv_dmp_get_size_cpu(void)
229 {
230 return info_dump.dump_cpu_buffer_len;
231 }
232
kvm_s390_pv_dmp_get_size_completion_data(void)233 uint64_t kvm_s390_pv_dmp_get_size_completion_data(void)
234 {
235 return info_dump.dump_config_finalize_len;
236 }
237
kvm_s390_pv_dmp_get_size_mem_state(void)238 uint64_t kvm_s390_pv_dmp_get_size_mem_state(void)
239 {
240 return info_dump.dump_config_mem_buffer_per_1m;
241 }
242
kvm_s390_pv_info_basic_valid(void)243 bool kvm_s390_pv_info_basic_valid(void)
244 {
245 return info_valid;
246 }
247
s390_pv_dump_cmd(uint64_t subcmd,uint64_t uaddr,uint64_t gaddr,uint64_t len)248 static int s390_pv_dump_cmd(uint64_t subcmd, uint64_t uaddr, uint64_t gaddr,
249 uint64_t len)
250 {
251 struct kvm_s390_pv_dmp dmp = {
252 .subcmd = subcmd,
253 .buff_addr = uaddr,
254 .buff_len = len,
255 .gaddr = gaddr,
256 };
257 int ret;
258
259 ret = s390_pv_cmd(KVM_PV_DUMP, (void *)&dmp);
260 if (ret) {
261 error_report("KVM DUMP command %ld failed", subcmd);
262 }
263 return ret;
264 }
265
kvm_s390_dump_cpu(S390CPU * cpu,void * buff)266 int kvm_s390_dump_cpu(S390CPU *cpu, void *buff)
267 {
268 struct kvm_s390_pv_dmp dmp = {
269 .subcmd = KVM_PV_DUMP_CPU,
270 .buff_addr = (uint64_t)buff,
271 .gaddr = 0,
272 .buff_len = info_dump.dump_cpu_buffer_len,
273 };
274 struct kvm_pv_cmd pv = {
275 .cmd = KVM_PV_DUMP,
276 .data = (uint64_t)&dmp,
277 };
278
279 return kvm_vcpu_ioctl(CPU(cpu), KVM_S390_PV_CPU_COMMAND, &pv);
280 }
281
kvm_s390_dump_init(void)282 int kvm_s390_dump_init(void)
283 {
284 return s390_pv_dump_cmd(KVM_PV_DUMP_INIT, 0, 0, 0);
285 }
286
kvm_s390_dump_mem_state(uint64_t gaddr,size_t len,void * dest)287 int kvm_s390_dump_mem_state(uint64_t gaddr, size_t len, void *dest)
288 {
289 return s390_pv_dump_cmd(KVM_PV_DUMP_CONFIG_STOR_STATE, (uint64_t)dest,
290 gaddr, len);
291 }
292
kvm_s390_dump_completion_data(void * buff)293 int kvm_s390_dump_completion_data(void *buff)
294 {
295 return s390_pv_dump_cmd(KVM_PV_DUMP_COMPLETE, (uint64_t)buff, 0,
296 info_dump.dump_config_finalize_len);
297 }
298
299 #define TYPE_S390_PV_GUEST "s390-pv-guest"
300 OBJECT_DECLARE_SIMPLE_TYPE(S390PVGuest, S390_PV_GUEST)
301
302 /**
303 * S390PVGuest:
304 *
305 * The S390PVGuest object is basically a dummy used to tell the
306 * confidential guest support system to use s390's PV mechanism.
307 *
308 * # $QEMU \
309 * -object s390-pv-guest,id=pv0 \
310 * -machine ...,confidential-guest-support=pv0
311 */
312 struct S390PVGuest {
313 ConfidentialGuestSupport parent_obj;
314 };
315
316 typedef struct S390PVGuestClass S390PVGuestClass;
317
318 struct S390PVGuestClass {
319 ConfidentialGuestSupportClass parent_class;
320 };
321
322 /*
323 * If protected virtualization is enabled, the amount of data that the
324 * Read SCP Info Service Call can use is limited to one page. The
325 * available space also depends on the Extended-Length SCCB (ELS)
326 * feature which can take more buffer space to store feature
327 * information. This impacts the maximum number of CPUs supported in
328 * the machine.
329 */
s390_pv_get_max_cpus(void)330 static uint32_t s390_pv_get_max_cpus(void)
331 {
332 int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ?
333 offsetof(ReadInfo, entries) : SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET;
334
335 return (TARGET_PAGE_SIZE - offset_cpu) / sizeof(CPUEntry);
336 }
337
s390_pv_check_cpus(Error ** errp)338 static bool s390_pv_check_cpus(Error **errp)
339 {
340 MachineState *ms = MACHINE(qdev_get_machine());
341 uint32_t pv_max_cpus = s390_pv_get_max_cpus();
342
343 if (ms->smp.max_cpus > pv_max_cpus) {
344 error_setg(errp, "Protected VMs support a maximum of %d CPUs",
345 pv_max_cpus);
346 return false;
347 }
348
349 return true;
350 }
351
s390_pv_guest_check(ConfidentialGuestSupport * cgs,Error ** errp)352 static bool s390_pv_guest_check(ConfidentialGuestSupport *cgs, Error **errp)
353 {
354 return s390_pv_check_cpus(errp);
355 }
356
s390_pv_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)357 static int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
358 {
359 if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) {
360 return 0;
361 }
362
363 if (!kvm_enabled()) {
364 error_setg(errp, "Protected Virtualization requires KVM");
365 return -1;
366 }
367
368 if (!s390_has_feat(S390_FEAT_UNPACK)) {
369 error_setg(errp,
370 "CPU model does not support Protected Virtualization");
371 return -1;
372 }
373
374 if (!s390_pv_guest_check(cgs, errp)) {
375 return -1;
376 }
377
378 cgs->ready = true;
379
380 return 0;
381 }
382
383 OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest,
384 s390_pv_guest,
385 S390_PV_GUEST,
386 CONFIDENTIAL_GUEST_SUPPORT,
387 { TYPE_USER_CREATABLE },
388 { NULL })
389
s390_pv_guest_class_init(ObjectClass * oc,const void * data)390 static void s390_pv_guest_class_init(ObjectClass *oc, const void *data)
391 {
392 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
393
394 klass->kvm_init = s390_pv_kvm_init;
395 }
396
s390_pv_guest_init(Object * obj)397 static void s390_pv_guest_init(Object *obj)
398 {
399 }
400
s390_pv_guest_finalize(Object * obj)401 static void s390_pv_guest_finalize(Object *obj)
402 {
403 }
404