1 /*
2 * QEMU SEV support
3 *
4 * Copyright Advanced Micro Devices 2016-2018
5 *
6 * Author:
7 * Brijesh Singh <brijesh.singh@amd.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15
16 #include <linux/kvm.h>
17 #include <linux/kvm_para.h>
18 #include <linux/psp-sev.h>
19
20 #include <sys/ioctl.h>
21
22 #include "qapi/error.h"
23 #include "qom/object_interfaces.h"
24 #include "qemu/base64.h"
25 #include "qemu/module.h"
26 #include "qemu/uuid.h"
27 #include "qemu/error-report.h"
28 #include "crypto/hash.h"
29 #include "exec/target_page.h"
30 #include "system/kvm.h"
31 #include "kvm/kvm_i386.h"
32 #include "sev.h"
33 #include "system/system.h"
34 #include "system/runstate.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qom/object.h"
38 #include "monitor/monitor.h"
39 #include "monitor/hmp-target.h"
40 #include "qapi/qapi-commands-misc-i386.h"
41 #include "confidential-guest.h"
42 #include "hw/i386/pc.h"
43 #include "system/address-spaces.h"
44 #include "qemu/queue.h"
45
46 OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON)
47 OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST)
48 OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST)
49
50 /* hard code sha256 digest size */
51 #define HASH_SIZE 32
52
53 typedef struct QEMU_PACKED SevHashTableEntry {
54 QemuUUID guid;
55 uint16_t len;
56 uint8_t hash[HASH_SIZE];
57 } SevHashTableEntry;
58
59 typedef struct QEMU_PACKED SevHashTable {
60 QemuUUID guid;
61 uint16_t len;
62 SevHashTableEntry cmdline;
63 SevHashTableEntry initrd;
64 SevHashTableEntry kernel;
65 } SevHashTable;
66
67 /*
68 * Data encrypted by sev_encrypt_flash() must be padded to a multiple of
69 * 16 bytes.
70 */
71 typedef struct QEMU_PACKED PaddedSevHashTable {
72 SevHashTable ht;
73 uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)];
74 } PaddedSevHashTable;
75
76 QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0);
77
78 #define SEV_INFO_BLOCK_GUID "00f771de-1a7e-4fcb-890e-68c77e2fb44e"
79 typedef struct __attribute__((__packed__)) SevInfoBlock {
80 /* SEV-ES Reset Vector Address */
81 uint32_t reset_addr;
82 } SevInfoBlock;
83
84 #define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454"
85 typedef struct QEMU_PACKED SevHashTableDescriptor {
86 /* SEV hash table area guest address */
87 uint32_t base;
88 /* SEV hash table area size (in bytes) */
89 uint32_t size;
90 } SevHashTableDescriptor;
91
92 struct SevCommonState {
93 X86ConfidentialGuest parent_obj;
94
95 int kvm_type;
96
97 /* configuration parameters */
98 char *sev_device;
99 uint32_t cbitpos;
100 uint32_t reduced_phys_bits;
101 bool kernel_hashes;
102
103 /* runtime state */
104 uint8_t api_major;
105 uint8_t api_minor;
106 uint8_t build_id;
107 int sev_fd;
108 SevState state;
109
110 uint32_t reset_cs;
111 uint32_t reset_ip;
112 bool reset_data_valid;
113 };
114
115 struct SevCommonStateClass {
116 X86ConfidentialGuestClass parent_class;
117
118 /* public */
119 bool (*build_kernel_loader_hashes)(SevCommonState *sev_common,
120 SevHashTableDescriptor *area,
121 SevKernelLoaderContext *ctx,
122 Error **errp);
123 int (*launch_start)(SevCommonState *sev_common);
124 void (*launch_finish)(SevCommonState *sev_common);
125 int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa, uint8_t *ptr, size_t len);
126 int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
127 };
128
129 /**
130 * SevGuestState:
131 *
132 * The SevGuestState object is used for creating and managing a SEV
133 * guest.
134 *
135 * # $QEMU \
136 * -object sev-guest,id=sev0 \
137 * -machine ...,memory-encryption=sev0
138 */
139 struct SevGuestState {
140 SevCommonState parent_obj;
141 gchar *measurement;
142
143 /* configuration parameters */
144 uint32_t handle;
145 uint32_t policy;
146 char *dh_cert_file;
147 char *session_file;
148 OnOffAuto legacy_vm_type;
149 };
150
151 struct SevSnpGuestState {
152 SevCommonState parent_obj;
153
154 /* configuration parameters */
155 char *guest_visible_workarounds;
156 char *id_block_base64;
157 uint8_t *id_block;
158 char *id_auth_base64;
159 uint8_t *id_auth;
160 char *host_data;
161
162 struct kvm_sev_snp_launch_start kvm_start_conf;
163 struct kvm_sev_snp_launch_finish kvm_finish_conf;
164
165 uint32_t kernel_hashes_offset;
166 PaddedSevHashTable *kernel_hashes_data;
167 };
168
169 #define DEFAULT_GUEST_POLICY 0x1 /* disable debug */
170 #define DEFAULT_SEV_DEVICE "/dev/sev"
171 #define DEFAULT_SEV_SNP_POLICY 0x30000
172
173 typedef struct SevLaunchUpdateData {
174 QTAILQ_ENTRY(SevLaunchUpdateData) next;
175 hwaddr gpa;
176 void *hva;
177 size_t len;
178 int type;
179 } SevLaunchUpdateData;
180
181 static QTAILQ_HEAD(, SevLaunchUpdateData) launch_update;
182
183 static Error *sev_mig_blocker;
184
185 static const char *const sev_fw_errlist[] = {
186 [SEV_RET_SUCCESS] = "",
187 [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid",
188 [SEV_RET_INVALID_GUEST_STATE] = "Guest state is invalid",
189 [SEV_RET_INAVLID_CONFIG] = "Platform configuration is invalid",
190 [SEV_RET_INVALID_LEN] = "Buffer too small",
191 [SEV_RET_ALREADY_OWNED] = "Platform is already owned",
192 [SEV_RET_INVALID_CERTIFICATE] = "Certificate is invalid",
193 [SEV_RET_POLICY_FAILURE] = "Policy is not allowed",
194 [SEV_RET_INACTIVE] = "Guest is not active",
195 [SEV_RET_INVALID_ADDRESS] = "Invalid address",
196 [SEV_RET_BAD_SIGNATURE] = "Bad signature",
197 [SEV_RET_BAD_MEASUREMENT] = "Bad measurement",
198 [SEV_RET_ASID_OWNED] = "ASID is already owned",
199 [SEV_RET_INVALID_ASID] = "Invalid ASID",
200 [SEV_RET_WBINVD_REQUIRED] = "WBINVD is required",
201 [SEV_RET_DFFLUSH_REQUIRED] = "DF_FLUSH is required",
202 [SEV_RET_INVALID_GUEST] = "Guest handle is invalid",
203 [SEV_RET_INVALID_COMMAND] = "Invalid command",
204 [SEV_RET_ACTIVE] = "Guest is active",
205 [SEV_RET_HWSEV_RET_PLATFORM] = "Hardware error",
206 [SEV_RET_HWSEV_RET_UNSAFE] = "Hardware unsafe",
207 [SEV_RET_UNSUPPORTED] = "Feature not supported",
208 [SEV_RET_INVALID_PARAM] = "Invalid parameter",
209 [SEV_RET_RESOURCE_LIMIT] = "Required firmware resource depleted",
210 [SEV_RET_SECURE_DATA_INVALID] = "Part-specific integrity check failure",
211 };
212
213 #define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
214
215 #define SNP_CPUID_FUNCTION_MAXCOUNT 64
216 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
217
218 typedef struct {
219 uint32_t eax_in;
220 uint32_t ecx_in;
221 uint64_t xcr0_in;
222 uint64_t xss_in;
223 uint32_t eax;
224 uint32_t ebx;
225 uint32_t ecx;
226 uint32_t edx;
227 uint64_t reserved;
228 } __attribute__((packed)) SnpCpuidFunc;
229
230 typedef struct {
231 uint32_t count;
232 uint32_t reserved1;
233 uint64_t reserved2;
234 SnpCpuidFunc entries[SNP_CPUID_FUNCTION_MAXCOUNT];
235 } __attribute__((packed)) SnpCpuidInfo;
236
237 static int
sev_ioctl(int fd,int cmd,void * data,int * error)238 sev_ioctl(int fd, int cmd, void *data, int *error)
239 {
240 int r;
241 struct kvm_sev_cmd input;
242
243 memset(&input, 0x0, sizeof(input));
244
245 input.id = cmd;
246 input.sev_fd = fd;
247 input.data = (uintptr_t)data;
248
249 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input);
250
251 if (error) {
252 *error = input.error;
253 }
254
255 return r;
256 }
257
258 static int
sev_platform_ioctl(int fd,int cmd,void * data,int * error)259 sev_platform_ioctl(int fd, int cmd, void *data, int *error)
260 {
261 int r;
262 struct sev_issue_cmd arg;
263
264 arg.cmd = cmd;
265 arg.data = (unsigned long)data;
266 r = ioctl(fd, SEV_ISSUE_CMD, &arg);
267 if (error) {
268 *error = arg.error;
269 }
270
271 return r;
272 }
273
274 static const char *
fw_error_to_str(int code)275 fw_error_to_str(int code)
276 {
277 if (code < 0 || code >= SEV_FW_MAX_ERROR) {
278 return "unknown error";
279 }
280
281 return sev_fw_errlist[code];
282 }
283
284 static bool
sev_check_state(const SevCommonState * sev_common,SevState state)285 sev_check_state(const SevCommonState *sev_common, SevState state)
286 {
287 assert(sev_common);
288 return sev_common->state == state ? true : false;
289 }
290
291 static void
sev_set_guest_state(SevCommonState * sev_common,SevState new_state)292 sev_set_guest_state(SevCommonState *sev_common, SevState new_state)
293 {
294 assert(new_state < SEV_STATE__MAX);
295 assert(sev_common);
296
297 trace_kvm_sev_change_state(SevState_str(sev_common->state),
298 SevState_str(new_state));
299 sev_common->state = new_state;
300 }
301
302 static void
sev_ram_block_added(RAMBlockNotifier * n,void * host,size_t size,size_t max_size)303 sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
304 size_t max_size)
305 {
306 int r;
307 struct kvm_enc_region range;
308 ram_addr_t offset;
309 MemoryRegion *mr;
310
311 /*
312 * The RAM device presents a memory region that should be treated
313 * as IO region and should not be pinned.
314 */
315 mr = memory_region_from_host(host, &offset);
316 if (mr && memory_region_is_ram_device(mr)) {
317 return;
318 }
319
320 range.addr = (uintptr_t)host;
321 range.size = max_size;
322
323 trace_kvm_memcrypt_register_region(host, max_size);
324 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
325 if (r) {
326 error_report("%s: failed to register region (%p+%#zx) error '%s'",
327 __func__, host, max_size, strerror(errno));
328 exit(1);
329 }
330 }
331
332 static void
sev_ram_block_removed(RAMBlockNotifier * n,void * host,size_t size,size_t max_size)333 sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
334 size_t max_size)
335 {
336 int r;
337 struct kvm_enc_region range;
338 ram_addr_t offset;
339 MemoryRegion *mr;
340
341 /*
342 * The RAM device presents a memory region that should be treated
343 * as IO region and should not have been pinned.
344 */
345 mr = memory_region_from_host(host, &offset);
346 if (mr && memory_region_is_ram_device(mr)) {
347 return;
348 }
349
350 range.addr = (uintptr_t)host;
351 range.size = max_size;
352
353 trace_kvm_memcrypt_unregister_region(host, max_size);
354 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
355 if (r) {
356 error_report("%s: failed to unregister region (%p+%#zx)",
357 __func__, host, max_size);
358 }
359 }
360
361 static struct RAMBlockNotifier sev_ram_notifier = {
362 .ram_block_added = sev_ram_block_added,
363 .ram_block_removed = sev_ram_block_removed,
364 };
365
366 bool
sev_enabled(void)367 sev_enabled(void)
368 {
369 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
370
371 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON);
372 }
373
374 bool
sev_snp_enabled(void)375 sev_snp_enabled(void)
376 {
377 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
378
379 return !!object_dynamic_cast(OBJECT(cgs), TYPE_SEV_SNP_GUEST);
380 }
381
382 bool
sev_es_enabled(void)383 sev_es_enabled(void)
384 {
385 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
386
387 return sev_snp_enabled() ||
388 (sev_enabled() && SEV_GUEST(cgs)->policy & SEV_POLICY_ES);
389 }
390
391 uint32_t
sev_get_cbit_position(void)392 sev_get_cbit_position(void)
393 {
394 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
395
396 return sev_common ? sev_common->cbitpos : 0;
397 }
398
399 uint32_t
sev_get_reduced_phys_bits(void)400 sev_get_reduced_phys_bits(void)
401 {
402 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
403
404 return sev_common ? sev_common->reduced_phys_bits : 0;
405 }
406
sev_get_info(void)407 static SevInfo *sev_get_info(void)
408 {
409 SevInfo *info;
410 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
411
412 info = g_new0(SevInfo, 1);
413 info->enabled = sev_enabled();
414
415 if (info->enabled) {
416 info->api_major = sev_common->api_major;
417 info->api_minor = sev_common->api_minor;
418 info->build_id = sev_common->build_id;
419 info->state = sev_common->state;
420
421 if (sev_snp_enabled()) {
422 info->sev_type = SEV_GUEST_TYPE_SEV_SNP;
423 info->u.sev_snp.snp_policy =
424 object_property_get_uint(OBJECT(sev_common), "policy", NULL);
425 } else {
426 info->sev_type = SEV_GUEST_TYPE_SEV;
427 info->u.sev.handle = SEV_GUEST(sev_common)->handle;
428 info->u.sev.policy =
429 (uint32_t)object_property_get_uint(OBJECT(sev_common),
430 "policy", NULL);
431 }
432 }
433
434 return info;
435 }
436
qmp_query_sev(Error ** errp)437 SevInfo *qmp_query_sev(Error **errp)
438 {
439 SevInfo *info;
440
441 info = sev_get_info();
442 if (!info) {
443 error_setg(errp, "SEV feature is not available");
444 return NULL;
445 }
446
447 return info;
448 }
449
hmp_info_sev(Monitor * mon,const QDict * qdict)450 void hmp_info_sev(Monitor *mon, const QDict *qdict)
451 {
452 SevInfo *info = sev_get_info();
453
454 if (!info || !info->enabled) {
455 monitor_printf(mon, "SEV is not enabled\n");
456 goto out;
457 }
458
459 monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type));
460 monitor_printf(mon, "state: %s\n", SevState_str(info->state));
461 monitor_printf(mon, "build: %d\n", info->build_id);
462 monitor_printf(mon, "api version: %d.%d\n", info->api_major,
463 info->api_minor);
464
465 if (sev_snp_enabled()) {
466 monitor_printf(mon, "debug: %s\n",
467 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on"
468 : "off");
469 monitor_printf(mon, "SMT allowed: %s\n",
470 info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on"
471 : "off");
472 } else {
473 monitor_printf(mon, "handle: %d\n", info->u.sev.handle);
474 monitor_printf(mon, "debug: %s\n",
475 info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on");
476 monitor_printf(mon, "key-sharing: %s\n",
477 info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on");
478 }
479
480 out:
481 qapi_free_SevInfo(info);
482 }
483
484 static int
sev_get_pdh_info(int fd,guchar ** pdh,size_t * pdh_len,guchar ** cert_chain,size_t * cert_chain_len,Error ** errp)485 sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
486 size_t *cert_chain_len, Error **errp)
487 {
488 guchar *pdh_data = NULL;
489 guchar *cert_chain_data = NULL;
490 struct sev_user_data_pdh_cert_export export = {};
491 int err, r;
492
493 /* query the certificate length */
494 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
495 if (r < 0) {
496 if (err != SEV_RET_INVALID_LEN) {
497 error_setg(errp, "SEV: Failed to export PDH cert"
498 " ret=%d fw_err=%d (%s)",
499 r, err, fw_error_to_str(err));
500 return 1;
501 }
502 }
503
504 pdh_data = g_new(guchar, export.pdh_cert_len);
505 cert_chain_data = g_new(guchar, export.cert_chain_len);
506 export.pdh_cert_address = (unsigned long)pdh_data;
507 export.cert_chain_address = (unsigned long)cert_chain_data;
508
509 r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
510 if (r < 0) {
511 error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)",
512 r, err, fw_error_to_str(err));
513 goto e_free;
514 }
515
516 *pdh = pdh_data;
517 *pdh_len = export.pdh_cert_len;
518 *cert_chain = cert_chain_data;
519 *cert_chain_len = export.cert_chain_len;
520 return 0;
521
522 e_free:
523 g_free(pdh_data);
524 g_free(cert_chain_data);
525 return 1;
526 }
527
sev_get_cpu0_id(int fd,guchar ** id,size_t * id_len,Error ** errp)528 static int sev_get_cpu0_id(int fd, guchar **id, size_t *id_len, Error **errp)
529 {
530 guchar *id_data;
531 struct sev_user_data_get_id2 get_id2 = {};
532 int err, r;
533
534 /* query the ID length */
535 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
536 if (r < 0 && err != SEV_RET_INVALID_LEN) {
537 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
538 r, err, fw_error_to_str(err));
539 return 1;
540 }
541
542 id_data = g_new(guchar, get_id2.length);
543 get_id2.address = (unsigned long)id_data;
544
545 r = sev_platform_ioctl(fd, SEV_GET_ID2, &get_id2, &err);
546 if (r < 0) {
547 error_setg(errp, "SEV: Failed to get ID ret=%d fw_err=%d (%s)",
548 r, err, fw_error_to_str(err));
549 goto err;
550 }
551
552 *id = id_data;
553 *id_len = get_id2.length;
554 return 0;
555
556 err:
557 g_free(id_data);
558 return 1;
559 }
560
sev_get_capabilities(Error ** errp)561 static SevCapability *sev_get_capabilities(Error **errp)
562 {
563 SevCapability *cap = NULL;
564 guchar *pdh_data = NULL;
565 guchar *cert_chain_data = NULL;
566 guchar *cpu0_id_data = NULL;
567 size_t pdh_len = 0, cert_chain_len = 0, cpu0_id_len = 0;
568 uint32_t ebx;
569 int fd;
570 SevCommonState *sev_common;
571 char *sev_device;
572
573 if (!kvm_enabled()) {
574 error_setg(errp, "KVM not enabled");
575 return NULL;
576 }
577 if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
578 error_setg(errp, "SEV is not enabled in KVM");
579 return NULL;
580 }
581
582 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
583 if (sev_common) {
584 sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
585 &error_abort);
586 } else {
587 sev_device = g_strdup(DEFAULT_SEV_DEVICE);
588 }
589
590 fd = open(sev_device, O_RDWR);
591 if (fd < 0) {
592 error_setg_errno(errp, errno, "SEV: Failed to open %s",
593 sev_device);
594 g_free(sev_device);
595 return NULL;
596 }
597 g_free(sev_device);
598
599 if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
600 &cert_chain_data, &cert_chain_len, errp)) {
601 goto out;
602 }
603
604 if (sev_get_cpu0_id(fd, &cpu0_id_data, &cpu0_id_len, errp)) {
605 goto out;
606 }
607
608 cap = g_new0(SevCapability, 1);
609 cap->pdh = g_base64_encode(pdh_data, pdh_len);
610 cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len);
611 cap->cpu0_id = g_base64_encode(cpu0_id_data, cpu0_id_len);
612
613 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
614 cap->cbitpos = ebx & 0x3f;
615
616 /*
617 * When SEV feature is enabled, we loose one bit in guest physical
618 * addressing.
619 */
620 cap->reduced_phys_bits = 1;
621
622 out:
623 g_free(cpu0_id_data);
624 g_free(pdh_data);
625 g_free(cert_chain_data);
626 close(fd);
627 return cap;
628 }
629
qmp_query_sev_capabilities(Error ** errp)630 SevCapability *qmp_query_sev_capabilities(Error **errp)
631 {
632 return sev_get_capabilities(errp);
633 }
634
635 static OvmfSevMetadata *ovmf_sev_metadata_table;
636
637 #define OVMF_SEV_META_DATA_GUID "dc886566-984a-4798-A75e-5585a7bf67cc"
638 typedef struct __attribute__((__packed__)) OvmfSevMetadataOffset {
639 uint32_t offset;
640 } OvmfSevMetadataOffset;
641
pc_system_get_ovmf_sev_metadata_ptr(void)642 OvmfSevMetadata *pc_system_get_ovmf_sev_metadata_ptr(void)
643 {
644 return ovmf_sev_metadata_table;
645 }
646
pc_system_parse_sev_metadata(uint8_t * flash_ptr,size_t flash_size)647 void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size)
648 {
649 OvmfSevMetadata *metadata;
650 OvmfSevMetadataOffset *data;
651
652 if (!pc_system_ovmf_table_find(OVMF_SEV_META_DATA_GUID, (uint8_t **)&data,
653 NULL)) {
654 return;
655 }
656
657 metadata = (OvmfSevMetadata *)(flash_ptr + flash_size - data->offset);
658 if (memcmp(metadata->signature, "ASEV", 4) != 0 ||
659 metadata->len < sizeof(OvmfSevMetadata) ||
660 metadata->len > flash_size - data->offset) {
661 return;
662 }
663
664 ovmf_sev_metadata_table = g_memdup2(metadata, metadata->len);
665 }
666
sev_get_attestation_report(const char * mnonce,Error ** errp)667 static SevAttestationReport *sev_get_attestation_report(const char *mnonce,
668 Error **errp)
669 {
670 struct kvm_sev_attestation_report input = {};
671 SevAttestationReport *report = NULL;
672 SevCommonState *sev_common;
673 g_autofree guchar *data = NULL;
674 g_autofree guchar *buf = NULL;
675 gsize len;
676 int err = 0, ret;
677
678 if (!sev_enabled()) {
679 error_setg(errp, "SEV is not enabled");
680 return NULL;
681 }
682
683 /* lets decode the mnonce string */
684 buf = g_base64_decode(mnonce, &len);
685 if (!buf) {
686 error_setg(errp, "SEV: failed to decode mnonce input");
687 return NULL;
688 }
689
690 /* verify the input mnonce length */
691 if (len != sizeof(input.mnonce)) {
692 error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")",
693 sizeof(input.mnonce), len);
694 return NULL;
695 }
696
697 sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
698
699 /* Query the report length */
700 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
701 &input, &err);
702 if (ret < 0) {
703 if (err != SEV_RET_INVALID_LEN) {
704 error_setg(errp, "SEV: Failed to query the attestation report"
705 " length ret=%d fw_err=%d (%s)",
706 ret, err, fw_error_to_str(err));
707 return NULL;
708 }
709 }
710
711 data = g_malloc(input.len);
712 input.uaddr = (unsigned long)data;
713 memcpy(input.mnonce, buf, sizeof(input.mnonce));
714
715 /* Query the report */
716 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
717 &input, &err);
718 if (ret) {
719 error_setg_errno(errp, errno, "SEV: Failed to get attestation report"
720 " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err));
721 return NULL;
722 }
723
724 report = g_new0(SevAttestationReport, 1);
725 report->data = g_base64_encode(data, input.len);
726
727 trace_kvm_sev_attestation_report(mnonce, report->data);
728
729 return report;
730 }
731
qmp_query_sev_attestation_report(const char * mnonce,Error ** errp)732 SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
733 Error **errp)
734 {
735 return sev_get_attestation_report(mnonce, errp);
736 }
737
738 static int
sev_read_file_base64(const char * filename,guchar ** data,gsize * len)739 sev_read_file_base64(const char *filename, guchar **data, gsize *len)
740 {
741 gsize sz;
742 g_autofree gchar *base64 = NULL;
743 GError *error = NULL;
744
745 if (!g_file_get_contents(filename, &base64, &sz, &error)) {
746 error_report("SEV: Failed to read '%s' (%s)", filename, error->message);
747 g_error_free(error);
748 return -1;
749 }
750
751 *data = g_base64_decode(base64, len);
752 return 0;
753 }
754
755 static int
sev_snp_launch_start(SevCommonState * sev_common)756 sev_snp_launch_start(SevCommonState *sev_common)
757 {
758 int fw_error, rc;
759 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
760 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
761
762 trace_kvm_sev_snp_launch_start(start->policy,
763 sev_snp_guest->guest_visible_workarounds);
764
765 if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
766 return 1;
767 }
768
769 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_START,
770 start, &fw_error);
771 if (rc < 0) {
772 error_report("%s: SNP_LAUNCH_START ret=%d fw_error=%d '%s'",
773 __func__, rc, fw_error, fw_error_to_str(fw_error));
774 return 1;
775 }
776
777 QTAILQ_INIT(&launch_update);
778
779 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
780
781 return 0;
782 }
783
784 static int
sev_launch_start(SevCommonState * sev_common)785 sev_launch_start(SevCommonState *sev_common)
786 {
787 gsize sz;
788 int ret = 1;
789 int fw_error, rc;
790 SevGuestState *sev_guest = SEV_GUEST(sev_common);
791 struct kvm_sev_launch_start start = {
792 .handle = sev_guest->handle, .policy = sev_guest->policy
793 };
794 guchar *session = NULL, *dh_cert = NULL;
795
796 if (sev_guest->session_file) {
797 if (sev_read_file_base64(sev_guest->session_file, &session, &sz) < 0) {
798 goto out;
799 }
800 start.session_uaddr = (unsigned long)session;
801 start.session_len = sz;
802 }
803
804 if (sev_guest->dh_cert_file) {
805 if (sev_read_file_base64(sev_guest->dh_cert_file, &dh_cert, &sz) < 0) {
806 goto out;
807 }
808 start.dh_uaddr = (unsigned long)dh_cert;
809 start.dh_len = sz;
810 }
811
812 trace_kvm_sev_launch_start(start.policy, session, dh_cert);
813 rc = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error);
814 if (rc < 0) {
815 error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
816 __func__, ret, fw_error, fw_error_to_str(fw_error));
817 goto out;
818 }
819
820 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_UPDATE);
821 sev_guest->handle = start.handle;
822 ret = 0;
823
824 out:
825 g_free(session);
826 g_free(dh_cert);
827 return ret;
828 }
829
830 static void
sev_snp_cpuid_report_mismatches(SnpCpuidInfo * old,SnpCpuidInfo * new)831 sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old,
832 SnpCpuidInfo *new)
833 {
834 size_t i;
835
836 if (old->count != new->count) {
837 error_report("SEV-SNP: CPUID validation failed due to count mismatch, "
838 "provided: %d, expected: %d", old->count, new->count);
839 return;
840 }
841
842 for (i = 0; i < old->count; i++) {
843 SnpCpuidFunc *old_func, *new_func;
844
845 old_func = &old->entries[i];
846 new_func = &new->entries[i];
847
848 if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) {
849 error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, "
850 "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, "
851 "expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x",
852 old_func->eax_in, old_func->ecx_in,
853 old_func->eax, old_func->ebx, old_func->ecx, old_func->edx,
854 new_func->eax, new_func->ebx, new_func->ecx, new_func->edx);
855 }
856 }
857 }
858
859 static const char *
snp_page_type_to_str(int type)860 snp_page_type_to_str(int type)
861 {
862 switch (type) {
863 case KVM_SEV_SNP_PAGE_TYPE_NORMAL: return "Normal";
864 case KVM_SEV_SNP_PAGE_TYPE_ZERO: return "Zero";
865 case KVM_SEV_SNP_PAGE_TYPE_UNMEASURED: return "Unmeasured";
866 case KVM_SEV_SNP_PAGE_TYPE_SECRETS: return "Secrets";
867 case KVM_SEV_SNP_PAGE_TYPE_CPUID: return "Cpuid";
868 default: return "unknown";
869 }
870 }
871
872 static int
sev_snp_launch_update(SevSnpGuestState * sev_snp_guest,SevLaunchUpdateData * data)873 sev_snp_launch_update(SevSnpGuestState *sev_snp_guest,
874 SevLaunchUpdateData *data)
875 {
876 int ret, fw_error;
877 SnpCpuidInfo snp_cpuid_info;
878 struct kvm_sev_snp_launch_update update = {0};
879
880 if (!data->hva || !data->len) {
881 error_report("SNP_LAUNCH_UPDATE called with invalid address"
882 "/ length: %p / %zx",
883 data->hva, data->len);
884 return 1;
885 }
886
887 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
888 /* Save a copy for comparison in case the LAUNCH_UPDATE fails */
889 memcpy(&snp_cpuid_info, data->hva, sizeof(snp_cpuid_info));
890 }
891
892 update.uaddr = (__u64)(unsigned long)data->hva;
893 update.gfn_start = data->gpa >> TARGET_PAGE_BITS;
894 update.len = data->len;
895 update.type = data->type;
896
897 /*
898 * KVM_SEV_SNP_LAUNCH_UPDATE requires that GPA ranges have the private
899 * memory attribute set in advance.
900 */
901 ret = kvm_set_memory_attributes_private(data->gpa, data->len);
902 if (ret) {
903 error_report("SEV-SNP: failed to configure initial"
904 "private guest memory");
905 goto out;
906 }
907
908 while (update.len || ret == -EAGAIN) {
909 trace_kvm_sev_snp_launch_update(update.uaddr, update.gfn_start <<
910 TARGET_PAGE_BITS, update.len,
911 snp_page_type_to_str(update.type));
912
913 ret = sev_ioctl(SEV_COMMON(sev_snp_guest)->sev_fd,
914 KVM_SEV_SNP_LAUNCH_UPDATE,
915 &update, &fw_error);
916 if (ret && ret != -EAGAIN) {
917 error_report("SNP_LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
918 ret, fw_error, fw_error_to_str(fw_error));
919
920 if (data->type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
921 sev_snp_cpuid_report_mismatches(&snp_cpuid_info, data->hva);
922 error_report("SEV-SNP: failed update CPUID page");
923 }
924 break;
925 }
926 }
927
928 out:
929 if (!ret && update.gfn_start << TARGET_PAGE_BITS != data->gpa + data->len) {
930 error_report("SEV-SNP: expected update of GPA range %"
931 HWADDR_PRIx "-%" HWADDR_PRIx ","
932 "got GPA range %" HWADDR_PRIx "-%llx",
933 data->gpa, data->gpa + data->len, data->gpa,
934 update.gfn_start << TARGET_PAGE_BITS);
935 ret = -EIO;
936 }
937
938 return ret;
939 }
940
941 static uint32_t
sev_snp_adjust_cpuid_features(X86ConfidentialGuest * cg,uint32_t feature,uint32_t index,int reg,uint32_t value)942 sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
943 int reg, uint32_t value)
944 {
945 switch (feature) {
946 case 1:
947 if (reg == R_ECX) {
948 return value & ~CPUID_EXT_TSC_DEADLINE_TIMER;
949 }
950 break;
951 case 7:
952 if (index == 0 && reg == R_EBX) {
953 return value & ~CPUID_7_0_EBX_TSC_ADJUST;
954 }
955 if (index == 0 && reg == R_EDX) {
956 return value & ~(CPUID_7_0_EDX_SPEC_CTRL |
957 CPUID_7_0_EDX_STIBP |
958 CPUID_7_0_EDX_FLUSH_L1D |
959 CPUID_7_0_EDX_ARCH_CAPABILITIES |
960 CPUID_7_0_EDX_CORE_CAPABILITY |
961 CPUID_7_0_EDX_SPEC_CTRL_SSBD);
962 }
963 break;
964 case 0x80000008:
965 if (reg == R_EBX) {
966 return value & ~CPUID_8000_0008_EBX_VIRT_SSBD;
967 }
968 break;
969 }
970 return value;
971 }
972
973 static int
sev_launch_update_data(SevCommonState * sev_common,hwaddr gpa,uint8_t * addr,size_t len)974 sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
975 uint8_t *addr, size_t len)
976 {
977 int ret, fw_error;
978 struct kvm_sev_launch_update_data update;
979
980 if (!addr || !len) {
981 return 1;
982 }
983
984 update.uaddr = (uintptr_t)addr;
985 update.len = len;
986 trace_kvm_sev_launch_update_data(addr, len);
987 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
988 &update, &fw_error);
989 if (ret) {
990 error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
991 __func__, ret, fw_error, fw_error_to_str(fw_error));
992 }
993
994 return ret;
995 }
996
997 static int
sev_launch_update_vmsa(SevGuestState * sev_guest)998 sev_launch_update_vmsa(SevGuestState *sev_guest)
999 {
1000 int ret, fw_error;
1001
1002 ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA,
1003 NULL, &fw_error);
1004 if (ret) {
1005 error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'",
1006 __func__, ret, fw_error, fw_error_to_str(fw_error));
1007 }
1008
1009 return ret;
1010 }
1011
1012 static void
sev_launch_get_measure(Notifier * notifier,void * unused)1013 sev_launch_get_measure(Notifier *notifier, void *unused)
1014 {
1015 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1016 SevGuestState *sev_guest = SEV_GUEST(sev_common);
1017 int ret, error;
1018 g_autofree guchar *data = NULL;
1019 struct kvm_sev_launch_measure measurement = {};
1020
1021 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
1022 return;
1023 }
1024
1025 if (sev_es_enabled()) {
1026 /* measure all the VM save areas before getting launch_measure */
1027 ret = sev_launch_update_vmsa(sev_guest);
1028 if (ret) {
1029 exit(1);
1030 }
1031 kvm_mark_guest_state_protected();
1032 }
1033
1034 /* query the measurement blob length */
1035 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1036 &measurement, &error);
1037 if (!measurement.len) {
1038 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1039 __func__, ret, error, fw_error_to_str(errno));
1040 return;
1041 }
1042
1043 data = g_new0(guchar, measurement.len);
1044 measurement.uaddr = (unsigned long)data;
1045
1046 /* get the measurement blob */
1047 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_MEASURE,
1048 &measurement, &error);
1049 if (ret) {
1050 error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
1051 __func__, ret, error, fw_error_to_str(errno));
1052 return;
1053 }
1054
1055 sev_set_guest_state(sev_common, SEV_STATE_LAUNCH_SECRET);
1056
1057 /* encode the measurement value and emit the event */
1058 sev_guest->measurement = g_base64_encode(data, measurement.len);
1059 trace_kvm_sev_launch_measurement(sev_guest->measurement);
1060 }
1061
sev_get_launch_measurement(void)1062 static char *sev_get_launch_measurement(void)
1063 {
1064 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
1065 SevGuestState *sev_guest =
1066 (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST);
1067
1068 if (sev_guest &&
1069 SEV_COMMON(sev_guest)->state >= SEV_STATE_LAUNCH_SECRET) {
1070 return g_strdup(sev_guest->measurement);
1071 }
1072
1073 return NULL;
1074 }
1075
qmp_query_sev_launch_measure(Error ** errp)1076 SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
1077 {
1078 char *data;
1079 SevLaunchMeasureInfo *info;
1080
1081 data = sev_get_launch_measurement();
1082 if (!data) {
1083 error_setg(errp, "SEV launch measurement is not available");
1084 return NULL;
1085 }
1086
1087 info = g_malloc0(sizeof(*info));
1088 info->data = data;
1089
1090 return info;
1091 }
1092
1093 static Notifier sev_machine_done_notify = {
1094 .notify = sev_launch_get_measure,
1095 };
1096
1097 static void
sev_launch_finish(SevCommonState * sev_common)1098 sev_launch_finish(SevCommonState *sev_common)
1099 {
1100 int ret, error;
1101
1102 trace_kvm_sev_launch_finish();
1103 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_FINISH, 0,
1104 &error);
1105 if (ret) {
1106 error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1107 __func__, ret, error, fw_error_to_str(error));
1108 exit(1);
1109 }
1110
1111 sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1112
1113 /* add migration blocker */
1114 error_setg(&sev_mig_blocker,
1115 "SEV: Migration is not implemented");
1116 migrate_add_blocker(&sev_mig_blocker, &error_fatal);
1117 }
1118
1119 static int
snp_launch_update_data(uint64_t gpa,void * hva,size_t len,int type)1120 snp_launch_update_data(uint64_t gpa, void *hva, size_t len, int type)
1121 {
1122 SevLaunchUpdateData *data;
1123
1124 data = g_new0(SevLaunchUpdateData, 1);
1125 data->gpa = gpa;
1126 data->hva = hva;
1127 data->len = len;
1128 data->type = type;
1129
1130 QTAILQ_INSERT_TAIL(&launch_update, data, next);
1131
1132 return 0;
1133 }
1134
1135 static int
sev_snp_launch_update_data(SevCommonState * sev_common,hwaddr gpa,uint8_t * ptr,size_t len)1136 sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
1137 uint8_t *ptr, size_t len)
1138 {
1139 int ret = snp_launch_update_data(gpa, ptr, len,
1140 KVM_SEV_SNP_PAGE_TYPE_NORMAL);
1141 return ret;
1142 }
1143
1144 static int
sev_snp_cpuid_info_fill(SnpCpuidInfo * snp_cpuid_info,const KvmCpuidInfo * kvm_cpuid_info)1145 sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info,
1146 const KvmCpuidInfo *kvm_cpuid_info)
1147 {
1148 size_t i;
1149
1150 if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) {
1151 error_report("SEV-SNP: CPUID entry count (%d) exceeds max (%d)",
1152 kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT);
1153 return -1;
1154 }
1155
1156 memset(snp_cpuid_info, 0, sizeof(*snp_cpuid_info));
1157
1158 for (i = 0; i < kvm_cpuid_info->cpuid.nent; i++) {
1159 const struct kvm_cpuid_entry2 *kvm_cpuid_entry;
1160 SnpCpuidFunc *snp_cpuid_entry;
1161
1162 kvm_cpuid_entry = &kvm_cpuid_info->entries[i];
1163 snp_cpuid_entry = &snp_cpuid_info->entries[i];
1164
1165 snp_cpuid_entry->eax_in = kvm_cpuid_entry->function;
1166 if (kvm_cpuid_entry->flags == KVM_CPUID_FLAG_SIGNIFCANT_INDEX) {
1167 snp_cpuid_entry->ecx_in = kvm_cpuid_entry->index;
1168 }
1169 snp_cpuid_entry->eax = kvm_cpuid_entry->eax;
1170 snp_cpuid_entry->ebx = kvm_cpuid_entry->ebx;
1171 snp_cpuid_entry->ecx = kvm_cpuid_entry->ecx;
1172 snp_cpuid_entry->edx = kvm_cpuid_entry->edx;
1173
1174 /*
1175 * Guest kernels will calculate EBX themselves using the 0xD
1176 * subfunctions corresponding to the individual XSAVE areas, so only
1177 * encode the base XSAVE size in the initial leaves, corresponding
1178 * to the initial XCR0=1 state.
1179 */
1180 if (snp_cpuid_entry->eax_in == 0xD &&
1181 (snp_cpuid_entry->ecx_in == 0x0 || snp_cpuid_entry->ecx_in == 0x1)) {
1182 snp_cpuid_entry->ebx = 0x240;
1183 snp_cpuid_entry->xcr0_in = 1;
1184 snp_cpuid_entry->xss_in = 0;
1185 }
1186 }
1187
1188 snp_cpuid_info->count = i;
1189
1190 return 0;
1191 }
1192
1193 static int
snp_launch_update_cpuid(uint32_t cpuid_addr,void * hva,size_t cpuid_len)1194 snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, size_t cpuid_len)
1195 {
1196 KvmCpuidInfo kvm_cpuid_info = {0};
1197 SnpCpuidInfo snp_cpuid_info;
1198 CPUState *cs = first_cpu;
1199 int ret;
1200 uint32_t i = 0;
1201
1202 assert(sizeof(snp_cpuid_info) <= cpuid_len);
1203
1204 /* get the cpuid list from KVM */
1205 do {
1206 kvm_cpuid_info.cpuid.nent = ++i;
1207 ret = kvm_vcpu_ioctl(cs, KVM_GET_CPUID2, &kvm_cpuid_info);
1208 } while (ret == -E2BIG);
1209
1210 if (ret) {
1211 error_report("SEV-SNP: unable to query CPUID values for CPU: '%s'",
1212 strerror(-ret));
1213 return 1;
1214 }
1215
1216 ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info);
1217 if (ret) {
1218 error_report("SEV-SNP: failed to generate CPUID table information");
1219 return 1;
1220 }
1221
1222 memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info));
1223
1224 return snp_launch_update_data(cpuid_addr, hva, cpuid_len,
1225 KVM_SEV_SNP_PAGE_TYPE_CPUID);
1226 }
1227
1228 static int
snp_launch_update_kernel_hashes(SevSnpGuestState * sev_snp,uint32_t addr,void * hva,uint32_t len)1229 snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, uint32_t addr,
1230 void *hva, uint32_t len)
1231 {
1232 int type = KVM_SEV_SNP_PAGE_TYPE_ZERO;
1233 if (sev_snp->parent_obj.kernel_hashes) {
1234 assert(sev_snp->kernel_hashes_data);
1235 assert((sev_snp->kernel_hashes_offset +
1236 sizeof(*sev_snp->kernel_hashes_data)) <= len);
1237 memset(hva, 0, len);
1238 memcpy(hva + sev_snp->kernel_hashes_offset, sev_snp->kernel_hashes_data,
1239 sizeof(*sev_snp->kernel_hashes_data));
1240 type = KVM_SEV_SNP_PAGE_TYPE_NORMAL;
1241 }
1242 return snp_launch_update_data(addr, hva, len, type);
1243 }
1244
1245 static int
snp_metadata_desc_to_page_type(int desc_type)1246 snp_metadata_desc_to_page_type(int desc_type)
1247 {
1248 switch (desc_type) {
1249 /* Add the umeasured prevalidated pages as a zero page */
1250 case SEV_DESC_TYPE_SNP_SEC_MEM: return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1251 case SEV_DESC_TYPE_SNP_SECRETS: return KVM_SEV_SNP_PAGE_TYPE_SECRETS;
1252 case SEV_DESC_TYPE_CPUID: return KVM_SEV_SNP_PAGE_TYPE_CPUID;
1253 default:
1254 return KVM_SEV_SNP_PAGE_TYPE_ZERO;
1255 }
1256 }
1257
1258 static void
snp_populate_metadata_pages(SevSnpGuestState * sev_snp,OvmfSevMetadata * metadata)1259 snp_populate_metadata_pages(SevSnpGuestState *sev_snp,
1260 OvmfSevMetadata *metadata)
1261 {
1262 OvmfSevMetadataDesc *desc;
1263 int type, ret, i;
1264 void *hva;
1265 MemoryRegion *mr = NULL;
1266
1267 for (i = 0; i < metadata->num_desc; i++) {
1268 desc = &metadata->descs[i];
1269
1270 type = snp_metadata_desc_to_page_type(desc->type);
1271
1272 hva = gpa2hva(&mr, desc->base, desc->len, NULL);
1273 if (!hva) {
1274 error_report("%s: Failed to get HVA for GPA 0x%x sz 0x%x",
1275 __func__, desc->base, desc->len);
1276 exit(1);
1277 }
1278
1279 if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) {
1280 ret = snp_launch_update_cpuid(desc->base, hva, desc->len);
1281 } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) {
1282 ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva,
1283 desc->len);
1284 } else {
1285 ret = snp_launch_update_data(desc->base, hva, desc->len, type);
1286 }
1287
1288 if (ret) {
1289 error_report("%s: Failed to add metadata page gpa 0x%x+%x type %d",
1290 __func__, desc->base, desc->len, desc->type);
1291 exit(1);
1292 }
1293 }
1294 }
1295
1296 static void
sev_snp_launch_finish(SevCommonState * sev_common)1297 sev_snp_launch_finish(SevCommonState *sev_common)
1298 {
1299 int ret, error;
1300 Error *local_err = NULL;
1301 OvmfSevMetadata *metadata;
1302 SevLaunchUpdateData *data;
1303 SevSnpGuestState *sev_snp = SEV_SNP_GUEST(sev_common);
1304 struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf;
1305
1306 /*
1307 * To boot the SNP guest, the hypervisor is required to populate the CPUID
1308 * and Secrets page before finalizing the launch flow. The location of
1309 * the secrets and CPUID page is available through the OVMF metadata GUID.
1310 */
1311 metadata = pc_system_get_ovmf_sev_metadata_ptr();
1312 if (metadata == NULL) {
1313 error_report("%s: Failed to locate SEV metadata header", __func__);
1314 exit(1);
1315 }
1316
1317 /* Populate all the metadata pages */
1318 snp_populate_metadata_pages(sev_snp, metadata);
1319
1320 QTAILQ_FOREACH(data, &launch_update, next) {
1321 ret = sev_snp_launch_update(sev_snp, data);
1322 if (ret) {
1323 exit(1);
1324 }
1325 }
1326
1327 trace_kvm_sev_snp_launch_finish(sev_snp->id_block_base64, sev_snp->id_auth_base64,
1328 sev_snp->host_data);
1329 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_SNP_LAUNCH_FINISH,
1330 finish, &error);
1331 if (ret) {
1332 error_report("SNP_LAUNCH_FINISH ret=%d fw_error=%d '%s'",
1333 ret, error, fw_error_to_str(error));
1334 exit(1);
1335 }
1336
1337 kvm_mark_guest_state_protected();
1338 sev_set_guest_state(sev_common, SEV_STATE_RUNNING);
1339
1340 /* add migration blocker */
1341 error_setg(&sev_mig_blocker,
1342 "SEV-SNP: Migration is not implemented");
1343 ret = migrate_add_blocker(&sev_mig_blocker, &local_err);
1344 if (local_err) {
1345 error_report_err(local_err);
1346 error_free(sev_mig_blocker);
1347 exit(1);
1348 }
1349 }
1350
1351
1352 static void
sev_vm_state_change(void * opaque,bool running,RunState state)1353 sev_vm_state_change(void *opaque, bool running, RunState state)
1354 {
1355 SevCommonState *sev_common = opaque;
1356 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(opaque);
1357
1358 if (running) {
1359 if (!sev_check_state(sev_common, SEV_STATE_RUNNING)) {
1360 klass->launch_finish(sev_common);
1361 }
1362 }
1363 }
1364
1365 /*
1366 * This helper is to examine sev-guest properties and determine if any options
1367 * have been set which rely on the newer KVM_SEV_INIT2 interface and associated
1368 * KVM VM types.
1369 */
sev_init2_required(SevGuestState * sev_guest)1370 static bool sev_init2_required(SevGuestState *sev_guest)
1371 {
1372 /* Currently no KVM_SEV_INIT2-specific options are exposed via QEMU */
1373 return false;
1374 }
1375
sev_kvm_type(X86ConfidentialGuest * cg)1376 static int sev_kvm_type(X86ConfidentialGuest *cg)
1377 {
1378 SevCommonState *sev_common = SEV_COMMON(cg);
1379 SevGuestState *sev_guest = SEV_GUEST(sev_common);
1380 int kvm_type;
1381
1382 if (sev_common->kvm_type != -1) {
1383 goto out;
1384 }
1385
1386 /* These are the only cases where legacy VM types can be used. */
1387 if (sev_guest->legacy_vm_type == ON_OFF_AUTO_ON ||
1388 (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO &&
1389 !sev_init2_required(sev_guest))) {
1390 sev_common->kvm_type = KVM_X86_DEFAULT_VM;
1391 goto out;
1392 }
1393
1394 /*
1395 * Newer VM types are required, either explicitly via legacy-vm-type=on, or
1396 * implicitly via legacy-vm-type=auto along with additional sev-guest
1397 * properties that require the newer VM types.
1398 */
1399 kvm_type = (sev_guest->policy & SEV_POLICY_ES) ?
1400 KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM;
1401 if (!kvm_is_vm_type_supported(kvm_type)) {
1402 if (sev_guest->legacy_vm_type == ON_OFF_AUTO_AUTO) {
1403 error_report("SEV: host kernel does not support requested %s VM type, which is required "
1404 "for the set of options specified. To allow use of the legacy "
1405 "KVM_X86_DEFAULT_VM VM type, please disable any options that are not "
1406 "compatible with the legacy VM type, or upgrade your kernel.",
1407 kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1408 } else {
1409 error_report("SEV: host kernel does not support requested %s VM type. To allow use of "
1410 "the legacy KVM_X86_DEFAULT_VM VM type, the 'legacy-vm-type' argument "
1411 "must be set to 'on' or 'auto' for the sev-guest object.",
1412 kvm_type == KVM_X86_SEV_VM ? "KVM_X86_SEV_VM" : "KVM_X86_SEV_ES_VM");
1413 }
1414
1415 return -1;
1416 }
1417
1418 sev_common->kvm_type = kvm_type;
1419 out:
1420 return sev_common->kvm_type;
1421 }
1422
sev_snp_kvm_type(X86ConfidentialGuest * cg)1423 static int sev_snp_kvm_type(X86ConfidentialGuest *cg)
1424 {
1425 return KVM_X86_SNP_VM;
1426 }
1427
sev_common_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1428 static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1429 {
1430 char *devname;
1431 int ret, fw_error, cmd;
1432 uint32_t ebx;
1433 uint32_t host_cbitpos;
1434 struct sev_user_data_status status = {};
1435 SevCommonState *sev_common = SEV_COMMON(cgs);
1436 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(cgs);
1437 X86ConfidentialGuestClass *x86_klass =
1438 X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs);
1439
1440 sev_common->state = SEV_STATE_UNINIT;
1441
1442 host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
1443 host_cbitpos = ebx & 0x3f;
1444
1445 /*
1446 * The cbitpos value will be placed in bit positions 5:0 of the EBX
1447 * register of CPUID 0x8000001F. No need to verify the range as the
1448 * comparison against the host value accomplishes that.
1449 */
1450 if (host_cbitpos != sev_common->cbitpos) {
1451 error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'",
1452 __func__, host_cbitpos, sev_common->cbitpos);
1453 return -1;
1454 }
1455
1456 /*
1457 * The reduced-phys-bits value will be placed in bit positions 11:6 of
1458 * the EBX register of CPUID 0x8000001F, so verify the supplied value
1459 * is in the range of 1 to 63.
1460 */
1461 if (sev_common->reduced_phys_bits < 1 ||
1462 sev_common->reduced_phys_bits > 63) {
1463 error_setg(errp, "%s: reduced_phys_bits check failed,"
1464 " it should be in the range of 1 to 63, requested '%d'",
1465 __func__, sev_common->reduced_phys_bits);
1466 return -1;
1467 }
1468
1469 devname = object_property_get_str(OBJECT(sev_common), "sev-device", NULL);
1470 sev_common->sev_fd = open(devname, O_RDWR);
1471 if (sev_common->sev_fd < 0) {
1472 error_setg(errp, "%s: Failed to open %s '%s'", __func__,
1473 devname, strerror(errno));
1474 g_free(devname);
1475 return -1;
1476 }
1477 g_free(devname);
1478
1479 ret = sev_platform_ioctl(sev_common->sev_fd, SEV_PLATFORM_STATUS, &status,
1480 &fw_error);
1481 if (ret) {
1482 error_setg(errp, "%s: failed to get platform status ret=%d "
1483 "fw_error='%d: %s'", __func__, ret, fw_error,
1484 fw_error_to_str(fw_error));
1485 return -1;
1486 }
1487 sev_common->build_id = status.build;
1488 sev_common->api_major = status.api_major;
1489 sev_common->api_minor = status.api_minor;
1490
1491 if (sev_es_enabled()) {
1492 if (!kvm_kernel_irqchip_allowed()) {
1493 error_setg(errp, "%s: SEV-ES guests require in-kernel irqchip"
1494 "support", __func__);
1495 return -1;
1496 }
1497 }
1498
1499 if (sev_es_enabled() && !sev_snp_enabled()) {
1500 if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) {
1501 error_setg(errp, "%s: guest policy requires SEV-ES, but "
1502 "host SEV-ES support unavailable",
1503 __func__);
1504 return -1;
1505 }
1506 }
1507
1508 trace_kvm_sev_init();
1509 switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) {
1510 case KVM_X86_DEFAULT_VM:
1511 cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT;
1512
1513 ret = sev_ioctl(sev_common->sev_fd, cmd, NULL, &fw_error);
1514 break;
1515 case KVM_X86_SEV_VM:
1516 case KVM_X86_SEV_ES_VM:
1517 case KVM_X86_SNP_VM: {
1518 struct kvm_sev_init args = { 0 };
1519
1520 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error);
1521 break;
1522 }
1523 default:
1524 error_setg(errp, "%s: host kernel does not support the requested SEV configuration.",
1525 __func__);
1526 return -1;
1527 }
1528
1529 if (ret) {
1530 error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'",
1531 __func__, ret, fw_error, fw_error_to_str(fw_error));
1532 return -1;
1533 }
1534
1535 ret = klass->launch_start(sev_common);
1536
1537 if (ret) {
1538 error_setg(errp, "%s: failed to create encryption context", __func__);
1539 return -1;
1540 }
1541
1542 if (klass->kvm_init && klass->kvm_init(cgs, errp)) {
1543 return -1;
1544 }
1545
1546 qemu_add_vm_change_state_handler(sev_vm_state_change, sev_common);
1547
1548 cgs->ready = true;
1549
1550 return 0;
1551 }
1552
sev_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1553 static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1554 {
1555 int ret;
1556
1557 /*
1558 * SEV/SEV-ES rely on pinned memory to back guest RAM so discarding
1559 * isn't actually possible. With SNP, only guest_memfd pages are used
1560 * for private guest memory, so discarding of shared memory is still
1561 * possible..
1562 */
1563 ret = ram_block_discard_disable(true);
1564 if (ret) {
1565 error_setg(errp, "%s: cannot disable RAM discard", __func__);
1566 return -1;
1567 }
1568
1569 /*
1570 * SEV uses these notifiers to register/pin pages prior to guest use,
1571 * but SNP relies on guest_memfd for private pages, which has its
1572 * own internal mechanisms for registering/pinning private memory.
1573 */
1574 ram_block_notifier_add(&sev_ram_notifier);
1575
1576 /*
1577 * The machine done notify event is used for SEV guests to get the
1578 * measurement of the encrypted images. When SEV-SNP is enabled, the
1579 * measurement is part of the guest attestation process where it can
1580 * be collected without any reliance on the VMM. So skip registering
1581 * the notifier for SNP in favor of using guest attestation instead.
1582 */
1583 qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
1584
1585 return 0;
1586 }
1587
sev_snp_kvm_init(ConfidentialGuestSupport * cgs,Error ** errp)1588 static int sev_snp_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
1589 {
1590 MachineState *ms = MACHINE(qdev_get_machine());
1591 X86MachineState *x86ms = X86_MACHINE(ms);
1592
1593 if (x86ms->smm == ON_OFF_AUTO_AUTO) {
1594 x86ms->smm = ON_OFF_AUTO_OFF;
1595 } else if (x86ms->smm == ON_OFF_AUTO_ON) {
1596 error_setg(errp, "SEV-SNP does not support SMM.");
1597 return -1;
1598 }
1599
1600 return 0;
1601 }
1602
1603 int
sev_encrypt_flash(hwaddr gpa,uint8_t * ptr,uint64_t len,Error ** errp)1604 sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
1605 {
1606 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1607 SevCommonStateClass *klass;
1608
1609 if (!sev_common) {
1610 return 0;
1611 }
1612 klass = SEV_COMMON_GET_CLASS(sev_common);
1613
1614 /* if SEV is in update state then encrypt the data else do nothing */
1615 if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
1616 int ret;
1617
1618 ret = klass->launch_update_data(sev_common, gpa, ptr, len);
1619 if (ret < 0) {
1620 error_setg(errp, "SEV: Failed to encrypt pflash rom");
1621 return ret;
1622 }
1623 }
1624
1625 return 0;
1626 }
1627
sev_inject_launch_secret(const char * packet_hdr,const char * secret,uint64_t gpa,Error ** errp)1628 int sev_inject_launch_secret(const char *packet_hdr, const char *secret,
1629 uint64_t gpa, Error **errp)
1630 {
1631 ERRP_GUARD();
1632 struct kvm_sev_launch_secret input;
1633 g_autofree guchar *data = NULL, *hdr = NULL;
1634 int error, ret = 1;
1635 void *hva;
1636 gsize hdr_sz = 0, data_sz = 0;
1637 MemoryRegion *mr = NULL;
1638 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1639
1640 if (!sev_common) {
1641 error_setg(errp, "SEV not enabled for guest");
1642 return 1;
1643 }
1644
1645 /* secret can be injected only in this state */
1646 if (!sev_check_state(sev_common, SEV_STATE_LAUNCH_SECRET)) {
1647 error_setg(errp, "SEV: Not in correct state. (LSECRET) %x",
1648 sev_common->state);
1649 return 1;
1650 }
1651
1652 hdr = g_base64_decode(packet_hdr, &hdr_sz);
1653 if (!hdr || !hdr_sz) {
1654 error_setg(errp, "SEV: Failed to decode sequence header");
1655 return 1;
1656 }
1657
1658 data = g_base64_decode(secret, &data_sz);
1659 if (!data || !data_sz) {
1660 error_setg(errp, "SEV: Failed to decode data");
1661 return 1;
1662 }
1663
1664 hva = gpa2hva(&mr, gpa, data_sz, errp);
1665 if (!hva) {
1666 error_prepend(errp, "SEV: Failed to calculate guest address: ");
1667 return 1;
1668 }
1669
1670 input.hdr_uaddr = (uint64_t)(unsigned long)hdr;
1671 input.hdr_len = hdr_sz;
1672
1673 input.trans_uaddr = (uint64_t)(unsigned long)data;
1674 input.trans_len = data_sz;
1675
1676 input.guest_uaddr = (uint64_t)(unsigned long)hva;
1677 input.guest_len = data_sz;
1678
1679 trace_kvm_sev_launch_secret(gpa, input.guest_uaddr,
1680 input.trans_uaddr, input.trans_len);
1681
1682 ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_SECRET,
1683 &input, &error);
1684 if (ret) {
1685 error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'",
1686 ret, error, fw_error_to_str(error));
1687 return ret;
1688 }
1689
1690 return 0;
1691 }
1692
1693 #define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
1694 struct sev_secret_area {
1695 uint32_t base;
1696 uint32_t size;
1697 };
1698
qmp_sev_inject_launch_secret(const char * packet_hdr,const char * secret,bool has_gpa,uint64_t gpa,Error ** errp)1699 void qmp_sev_inject_launch_secret(const char *packet_hdr,
1700 const char *secret,
1701 bool has_gpa, uint64_t gpa,
1702 Error **errp)
1703 {
1704 if (!sev_enabled()) {
1705 error_setg(errp, "SEV not enabled for guest");
1706 return;
1707 }
1708 if (!has_gpa) {
1709 uint8_t *data;
1710 struct sev_secret_area *area;
1711
1712 if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
1713 error_setg(errp, "SEV: no secret area found in OVMF,"
1714 " gpa must be specified.");
1715 return;
1716 }
1717 area = (struct sev_secret_area *)data;
1718 gpa = area->base;
1719 }
1720
1721 sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
1722 }
1723
1724 static int
sev_es_parse_reset_block(SevInfoBlock * info,uint32_t * addr)1725 sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr)
1726 {
1727 if (!info->reset_addr) {
1728 error_report("SEV-ES reset address is zero");
1729 return 1;
1730 }
1731
1732 *addr = info->reset_addr;
1733
1734 return 0;
1735 }
1736
1737 static int
sev_es_find_reset_vector(void * flash_ptr,uint64_t flash_size,uint32_t * addr)1738 sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size,
1739 uint32_t *addr)
1740 {
1741 QemuUUID info_guid, *guid;
1742 SevInfoBlock *info;
1743 uint8_t *data;
1744 uint16_t *len;
1745
1746 /*
1747 * Initialize the address to zero. An address of zero with a successful
1748 * return code indicates that SEV-ES is not active.
1749 */
1750 *addr = 0;
1751
1752 /*
1753 * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID.
1754 * The SEV GUID is located on its own (original implementation) or within
1755 * the Firmware GUID Table (new implementation), either of which are
1756 * located 32 bytes from the end of the flash.
1757 *
1758 * Check the Firmware GUID Table first.
1759 */
1760 if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) {
1761 return sev_es_parse_reset_block((SevInfoBlock *)data, addr);
1762 }
1763
1764 /*
1765 * SEV info block not found in the Firmware GUID Table (or there isn't
1766 * a Firmware GUID Table), fall back to the original implementation.
1767 */
1768 data = flash_ptr + flash_size - 0x20;
1769
1770 qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid);
1771 info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */
1772
1773 guid = (QemuUUID *)(data - sizeof(info_guid));
1774 if (!qemu_uuid_is_equal(guid, &info_guid)) {
1775 error_report("SEV information block/Firmware GUID Table block not found in pflash rom");
1776 return 1;
1777 }
1778
1779 len = (uint16_t *)((uint8_t *)guid - sizeof(*len));
1780 info = (SevInfoBlock *)(data - le16_to_cpu(*len));
1781
1782 return sev_es_parse_reset_block(info, addr);
1783 }
1784
sev_es_set_reset_vector(CPUState * cpu)1785 void sev_es_set_reset_vector(CPUState *cpu)
1786 {
1787 X86CPU *x86;
1788 CPUX86State *env;
1789 ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
1790 SevCommonState *sev_common = SEV_COMMON(
1791 object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON));
1792
1793 /* Only update if we have valid reset information */
1794 if (!sev_common || !sev_common->reset_data_valid) {
1795 return;
1796 }
1797
1798 /* Do not update the BSP reset state */
1799 if (cpu->cpu_index == 0) {
1800 return;
1801 }
1802
1803 x86 = X86_CPU(cpu);
1804 env = &x86->env;
1805
1806 cpu_x86_load_seg_cache(env, R_CS, 0xf000, sev_common->reset_cs, 0xffff,
1807 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1808 DESC_R_MASK | DESC_A_MASK);
1809
1810 env->eip = sev_common->reset_ip;
1811 }
1812
sev_es_save_reset_vector(void * flash_ptr,uint64_t flash_size)1813 int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
1814 {
1815 CPUState *cpu;
1816 uint32_t addr;
1817 int ret;
1818 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1819
1820 if (!sev_es_enabled()) {
1821 return 0;
1822 }
1823
1824 addr = 0;
1825 ret = sev_es_find_reset_vector(flash_ptr, flash_size,
1826 &addr);
1827 if (ret) {
1828 return ret;
1829 }
1830
1831 if (addr) {
1832 sev_common->reset_cs = addr & 0xffff0000;
1833 sev_common->reset_ip = addr & 0x0000ffff;
1834 sev_common->reset_data_valid = true;
1835
1836 CPU_FOREACH(cpu) {
1837 sev_es_set_reset_vector(cpu);
1838 }
1839 }
1840
1841 return 0;
1842 }
1843
1844 static const QemuUUID sev_hash_table_header_guid = {
1845 .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93,
1846 0xd4, 0x11, 0xfd, 0x21)
1847 };
1848
1849 static const QemuUUID sev_kernel_entry_guid = {
1850 .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1,
1851 0x72, 0xd2, 0x04, 0x5b)
1852 };
1853 static const QemuUUID sev_initrd_entry_guid = {
1854 .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2,
1855 0x91, 0x69, 0x78, 0x1d)
1856 };
1857 static const QemuUUID sev_cmdline_entry_guid = {
1858 .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71,
1859 0x4d, 0x36, 0xab, 0x2a)
1860 };
1861
build_kernel_loader_hashes(PaddedSevHashTable * padded_ht,SevKernelLoaderContext * ctx,Error ** errp)1862 static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht,
1863 SevKernelLoaderContext *ctx,
1864 Error **errp)
1865 {
1866 SevHashTable *ht;
1867 uint8_t cmdline_hash[HASH_SIZE];
1868 uint8_t initrd_hash[HASH_SIZE];
1869 uint8_t kernel_hash[HASH_SIZE];
1870 uint8_t *hashp;
1871 size_t hash_len = HASH_SIZE;
1872
1873 /*
1874 * Calculate hash of kernel command-line with the terminating null byte. If
1875 * the user doesn't supply a command-line via -append, the 1-byte "\0" will
1876 * be used.
1877 */
1878 hashp = cmdline_hash;
1879 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->cmdline_data,
1880 ctx->cmdline_size, &hashp, &hash_len, errp) < 0) {
1881 return false;
1882 }
1883 assert(hash_len == HASH_SIZE);
1884
1885 /*
1886 * Calculate hash of initrd. If the user doesn't supply an initrd via
1887 * -initrd, an empty buffer will be used (ctx->initrd_size == 0).
1888 */
1889 hashp = initrd_hash;
1890 if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->initrd_data,
1891 ctx->initrd_size, &hashp, &hash_len, errp) < 0) {
1892 return false;
1893 }
1894 assert(hash_len == HASH_SIZE);
1895
1896 /* Calculate hash of the kernel */
1897 hashp = kernel_hash;
1898 struct iovec iov[2] = {
1899 { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size },
1900 { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size }
1901 };
1902 if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, iov, ARRAY_SIZE(iov),
1903 &hashp, &hash_len, errp) < 0) {
1904 return false;
1905 }
1906 assert(hash_len == HASH_SIZE);
1907
1908 ht = &padded_ht->ht;
1909
1910 ht->guid = sev_hash_table_header_guid;
1911 ht->len = sizeof(*ht);
1912
1913 ht->cmdline.guid = sev_cmdline_entry_guid;
1914 ht->cmdline.len = sizeof(ht->cmdline);
1915 memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash));
1916
1917 ht->initrd.guid = sev_initrd_entry_guid;
1918 ht->initrd.len = sizeof(ht->initrd);
1919 memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash));
1920
1921 ht->kernel.guid = sev_kernel_entry_guid;
1922 ht->kernel.len = sizeof(ht->kernel);
1923 memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
1924
1925 /* zero the excess data so the measurement can be reliably calculated */
1926 memset(padded_ht->padding, 0, sizeof(padded_ht->padding));
1927
1928 return true;
1929 }
1930
sev_snp_build_kernel_loader_hashes(SevCommonState * sev_common,SevHashTableDescriptor * area,SevKernelLoaderContext * ctx,Error ** errp)1931 static bool sev_snp_build_kernel_loader_hashes(SevCommonState *sev_common,
1932 SevHashTableDescriptor *area,
1933 SevKernelLoaderContext *ctx,
1934 Error **errp)
1935 {
1936 /*
1937 * SNP: Populate the hashes table in an area that later in
1938 * snp_launch_update_kernel_hashes() will be copied to the guest memory
1939 * and encrypted.
1940 */
1941 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(sev_common);
1942 sev_snp_guest->kernel_hashes_offset = area->base & ~TARGET_PAGE_MASK;
1943 sev_snp_guest->kernel_hashes_data = g_new0(PaddedSevHashTable, 1);
1944 return build_kernel_loader_hashes(sev_snp_guest->kernel_hashes_data, ctx, errp);
1945 }
1946
sev_build_kernel_loader_hashes(SevCommonState * sev_common,SevHashTableDescriptor * area,SevKernelLoaderContext * ctx,Error ** errp)1947 static bool sev_build_kernel_loader_hashes(SevCommonState *sev_common,
1948 SevHashTableDescriptor *area,
1949 SevKernelLoaderContext *ctx,
1950 Error **errp)
1951 {
1952 PaddedSevHashTable *padded_ht;
1953 hwaddr mapped_len = sizeof(*padded_ht);
1954 MemTxAttrs attrs = { 0 };
1955 bool ret = true;
1956
1957 /*
1958 * Populate the hashes table in the guest's memory at the OVMF-designated
1959 * area for the SEV hashes table
1960 */
1961 padded_ht = address_space_map(&address_space_memory, area->base,
1962 &mapped_len, true, attrs);
1963 if (!padded_ht || mapped_len != sizeof(*padded_ht)) {
1964 error_setg(errp, "SEV: cannot map hashes table guest memory area");
1965 return false;
1966 }
1967
1968 if (build_kernel_loader_hashes(padded_ht, ctx, errp)) {
1969 if (sev_encrypt_flash(area->base, (uint8_t *)padded_ht,
1970 sizeof(*padded_ht), errp) < 0) {
1971 ret = false;
1972 }
1973 } else {
1974 ret = false;
1975 }
1976
1977 address_space_unmap(&address_space_memory, padded_ht,
1978 mapped_len, true, mapped_len);
1979
1980 return ret;
1981 }
1982
1983 /*
1984 * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page
1985 * which is included in SEV's initial memory measurement.
1986 */
sev_add_kernel_loader_hashes(SevKernelLoaderContext * ctx,Error ** errp)1987 bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
1988 {
1989 uint8_t *data;
1990 SevHashTableDescriptor *area;
1991 SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
1992 SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
1993
1994 /*
1995 * Only add the kernel hashes if the sev-guest configuration explicitly
1996 * stated kernel-hashes=on.
1997 */
1998 if (!sev_common->kernel_hashes) {
1999 return false;
2000 }
2001
2002 if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) {
2003 error_setg(errp, "SEV: kernel specified but guest firmware "
2004 "has no hashes table GUID");
2005 return false;
2006 }
2007
2008 area = (SevHashTableDescriptor *)data;
2009 if (!area->base || area->size < sizeof(PaddedSevHashTable)) {
2010 error_setg(errp, "SEV: guest firmware hashes table area is invalid "
2011 "(base=0x%x size=0x%x)", area->base, area->size);
2012 return false;
2013 }
2014
2015 return klass->build_kernel_loader_hashes(sev_common, area, ctx, errp);
2016 }
2017
2018 static char *
sev_common_get_sev_device(Object * obj,Error ** errp)2019 sev_common_get_sev_device(Object *obj, Error **errp)
2020 {
2021 return g_strdup(SEV_COMMON(obj)->sev_device);
2022 }
2023
2024 static void
sev_common_set_sev_device(Object * obj,const char * value,Error ** errp)2025 sev_common_set_sev_device(Object *obj, const char *value, Error **errp)
2026 {
2027 SEV_COMMON(obj)->sev_device = g_strdup(value);
2028 }
2029
sev_common_get_kernel_hashes(Object * obj,Error ** errp)2030 static bool sev_common_get_kernel_hashes(Object *obj, Error **errp)
2031 {
2032 return SEV_COMMON(obj)->kernel_hashes;
2033 }
2034
sev_common_set_kernel_hashes(Object * obj,bool value,Error ** errp)2035 static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp)
2036 {
2037 SEV_COMMON(obj)->kernel_hashes = value;
2038 }
2039
2040 static void
sev_common_class_init(ObjectClass * oc,const void * data)2041 sev_common_class_init(ObjectClass *oc, const void *data)
2042 {
2043 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
2044
2045 klass->kvm_init = sev_common_kvm_init;
2046
2047 object_class_property_add_str(oc, "sev-device",
2048 sev_common_get_sev_device,
2049 sev_common_set_sev_device);
2050 object_class_property_set_description(oc, "sev-device",
2051 "SEV device to use");
2052 object_class_property_add_bool(oc, "kernel-hashes",
2053 sev_common_get_kernel_hashes,
2054 sev_common_set_kernel_hashes);
2055 object_class_property_set_description(oc, "kernel-hashes",
2056 "add kernel hashes to guest firmware for measured Linux boot");
2057 }
2058
2059 static void
sev_common_instance_init(Object * obj)2060 sev_common_instance_init(Object *obj)
2061 {
2062 SevCommonState *sev_common = SEV_COMMON(obj);
2063
2064 sev_common->kvm_type = -1;
2065
2066 sev_common->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
2067
2068 object_property_add_uint32_ptr(obj, "cbitpos", &sev_common->cbitpos,
2069 OBJ_PROP_FLAG_READWRITE);
2070 object_property_add_uint32_ptr(obj, "reduced-phys-bits",
2071 &sev_common->reduced_phys_bits,
2072 OBJ_PROP_FLAG_READWRITE);
2073 }
2074
2075 /* sev guest info common to sev/sev-es/sev-snp */
2076 static const TypeInfo sev_common_info = {
2077 .parent = TYPE_X86_CONFIDENTIAL_GUEST,
2078 .name = TYPE_SEV_COMMON,
2079 .instance_size = sizeof(SevCommonState),
2080 .instance_init = sev_common_instance_init,
2081 .class_size = sizeof(SevCommonStateClass),
2082 .class_init = sev_common_class_init,
2083 .abstract = true,
2084 .interfaces = (const InterfaceInfo[]) {
2085 { TYPE_USER_CREATABLE },
2086 { }
2087 }
2088 };
2089
2090 static char *
sev_guest_get_dh_cert_file(Object * obj,Error ** errp)2091 sev_guest_get_dh_cert_file(Object *obj, Error **errp)
2092 {
2093 return g_strdup(SEV_GUEST(obj)->dh_cert_file);
2094 }
2095
2096 static void
sev_guest_set_dh_cert_file(Object * obj,const char * value,Error ** errp)2097 sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
2098 {
2099 SEV_GUEST(obj)->dh_cert_file = g_strdup(value);
2100 }
2101
2102 static char *
sev_guest_get_session_file(Object * obj,Error ** errp)2103 sev_guest_get_session_file(Object *obj, Error **errp)
2104 {
2105 SevGuestState *sev_guest = SEV_GUEST(obj);
2106
2107 return sev_guest->session_file ? g_strdup(sev_guest->session_file) : NULL;
2108 }
2109
2110 static void
sev_guest_set_session_file(Object * obj,const char * value,Error ** errp)2111 sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
2112 {
2113 SEV_GUEST(obj)->session_file = g_strdup(value);
2114 }
2115
sev_guest_get_legacy_vm_type(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2116 static void sev_guest_get_legacy_vm_type(Object *obj, Visitor *v,
2117 const char *name, void *opaque,
2118 Error **errp)
2119 {
2120 SevGuestState *sev_guest = SEV_GUEST(obj);
2121 OnOffAuto legacy_vm_type = sev_guest->legacy_vm_type;
2122
2123 visit_type_OnOffAuto(v, name, &legacy_vm_type, errp);
2124 }
2125
sev_guest_set_legacy_vm_type(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2126 static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v,
2127 const char *name, void *opaque,
2128 Error **errp)
2129 {
2130 SevGuestState *sev_guest = SEV_GUEST(obj);
2131
2132 visit_type_OnOffAuto(v, name, &sev_guest->legacy_vm_type, errp);
2133 }
2134
2135 static void
sev_guest_class_init(ObjectClass * oc,const void * data)2136 sev_guest_class_init(ObjectClass *oc, const void *data)
2137 {
2138 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
2139 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
2140
2141 klass->build_kernel_loader_hashes = sev_build_kernel_loader_hashes;
2142 klass->launch_start = sev_launch_start;
2143 klass->launch_finish = sev_launch_finish;
2144 klass->launch_update_data = sev_launch_update_data;
2145 klass->kvm_init = sev_kvm_init;
2146 x86_klass->kvm_type = sev_kvm_type;
2147
2148 object_class_property_add_str(oc, "dh-cert-file",
2149 sev_guest_get_dh_cert_file,
2150 sev_guest_set_dh_cert_file);
2151 object_class_property_set_description(oc, "dh-cert-file",
2152 "guest owners DH certificate (encoded with base64)");
2153 object_class_property_add_str(oc, "session-file",
2154 sev_guest_get_session_file,
2155 sev_guest_set_session_file);
2156 object_class_property_set_description(oc, "session-file",
2157 "guest owners session parameters (encoded with base64)");
2158 object_class_property_add(oc, "legacy-vm-type", "OnOffAuto",
2159 sev_guest_get_legacy_vm_type,
2160 sev_guest_set_legacy_vm_type, NULL, NULL);
2161 object_class_property_set_description(oc, "legacy-vm-type",
2162 "use legacy VM type to maintain measurement compatibility with older QEMU or kernel versions.");
2163 }
2164
2165 static void
sev_guest_instance_init(Object * obj)2166 sev_guest_instance_init(Object *obj)
2167 {
2168 SevGuestState *sev_guest = SEV_GUEST(obj);
2169
2170 sev_guest->policy = DEFAULT_GUEST_POLICY;
2171 object_property_add_uint32_ptr(obj, "handle", &sev_guest->handle,
2172 OBJ_PROP_FLAG_READWRITE);
2173 object_property_add_uint32_ptr(obj, "policy", &sev_guest->policy,
2174 OBJ_PROP_FLAG_READWRITE);
2175 object_apply_compat_props(obj);
2176
2177 sev_guest->legacy_vm_type = ON_OFF_AUTO_AUTO;
2178 }
2179
2180 /* guest info specific sev/sev-es */
2181 static const TypeInfo sev_guest_info = {
2182 .parent = TYPE_SEV_COMMON,
2183 .name = TYPE_SEV_GUEST,
2184 .instance_size = sizeof(SevGuestState),
2185 .instance_init = sev_guest_instance_init,
2186 .class_init = sev_guest_class_init,
2187 };
2188
2189 static void
sev_snp_guest_get_policy(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2190 sev_snp_guest_get_policy(Object *obj, Visitor *v, const char *name,
2191 void *opaque, Error **errp)
2192 {
2193 visit_type_uint64(v, name,
2194 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2195 errp);
2196 }
2197
2198 static void
sev_snp_guest_set_policy(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2199 sev_snp_guest_set_policy(Object *obj, Visitor *v, const char *name,
2200 void *opaque, Error **errp)
2201 {
2202 visit_type_uint64(v, name,
2203 (uint64_t *)&SEV_SNP_GUEST(obj)->kvm_start_conf.policy,
2204 errp);
2205 }
2206
2207 static char *
sev_snp_guest_get_guest_visible_workarounds(Object * obj,Error ** errp)2208 sev_snp_guest_get_guest_visible_workarounds(Object *obj, Error **errp)
2209 {
2210 return g_strdup(SEV_SNP_GUEST(obj)->guest_visible_workarounds);
2211 }
2212
2213 static void
sev_snp_guest_set_guest_visible_workarounds(Object * obj,const char * value,Error ** errp)2214 sev_snp_guest_set_guest_visible_workarounds(Object *obj, const char *value,
2215 Error **errp)
2216 {
2217 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2218 struct kvm_sev_snp_launch_start *start = &sev_snp_guest->kvm_start_conf;
2219 g_autofree guchar *blob;
2220 gsize len;
2221
2222 g_free(sev_snp_guest->guest_visible_workarounds);
2223
2224 /* store the base64 str so we don't need to re-encode in getter */
2225 sev_snp_guest->guest_visible_workarounds = g_strdup(value);
2226
2227 blob = qbase64_decode(sev_snp_guest->guest_visible_workarounds,
2228 -1, &len, errp);
2229 if (!blob) {
2230 return;
2231 }
2232
2233 if (len != sizeof(start->gosvw)) {
2234 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2235 " exceeds max of %zu",
2236 len, sizeof(start->gosvw));
2237 return;
2238 }
2239
2240 memcpy(start->gosvw, blob, len);
2241 }
2242
2243 static char *
sev_snp_guest_get_id_block(Object * obj,Error ** errp)2244 sev_snp_guest_get_id_block(Object *obj, Error **errp)
2245 {
2246 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2247
2248 return g_strdup(sev_snp_guest->id_block_base64);
2249 }
2250
2251 static void
sev_snp_guest_set_id_block(Object * obj,const char * value,Error ** errp)2252 sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp)
2253 {
2254 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2255 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2256 gsize len;
2257
2258 finish->id_block_en = 0;
2259 g_free(sev_snp_guest->id_block);
2260 g_free(sev_snp_guest->id_block_base64);
2261
2262 /* store the base64 str so we don't need to re-encode in getter */
2263 sev_snp_guest->id_block_base64 = g_strdup(value);
2264 sev_snp_guest->id_block =
2265 qbase64_decode(sev_snp_guest->id_block_base64, -1, &len, errp);
2266
2267 if (!sev_snp_guest->id_block) {
2268 return;
2269 }
2270
2271 if (len != KVM_SEV_SNP_ID_BLOCK_SIZE) {
2272 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2273 " not equal to %u",
2274 len, KVM_SEV_SNP_ID_BLOCK_SIZE);
2275 return;
2276 }
2277
2278 finish->id_block_en = 1;
2279 finish->id_block_uaddr = (uintptr_t)sev_snp_guest->id_block;
2280 }
2281
2282 static char *
sev_snp_guest_get_id_auth(Object * obj,Error ** errp)2283 sev_snp_guest_get_id_auth(Object *obj, Error **errp)
2284 {
2285 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2286
2287 return g_strdup(sev_snp_guest->id_auth_base64);
2288 }
2289
2290 static void
sev_snp_guest_set_id_auth(Object * obj,const char * value,Error ** errp)2291 sev_snp_guest_set_id_auth(Object *obj, const char *value, Error **errp)
2292 {
2293 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2294 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2295 gsize len;
2296
2297 finish->id_auth_uaddr = 0;
2298 g_free(sev_snp_guest->id_auth);
2299 g_free(sev_snp_guest->id_auth_base64);
2300
2301 /* store the base64 str so we don't need to re-encode in getter */
2302 sev_snp_guest->id_auth_base64 = g_strdup(value);
2303 sev_snp_guest->id_auth =
2304 qbase64_decode(sev_snp_guest->id_auth_base64, -1, &len, errp);
2305
2306 if (!sev_snp_guest->id_auth) {
2307 return;
2308 }
2309
2310 if (len > KVM_SEV_SNP_ID_AUTH_SIZE) {
2311 error_setg(errp, "parameter length:ID_AUTH %" G_GSIZE_FORMAT
2312 " exceeds max of %u",
2313 len, KVM_SEV_SNP_ID_AUTH_SIZE);
2314 return;
2315 }
2316
2317 finish->id_auth_uaddr = (uintptr_t)sev_snp_guest->id_auth;
2318 }
2319
2320 static bool
sev_snp_guest_get_author_key_enabled(Object * obj,Error ** errp)2321 sev_snp_guest_get_author_key_enabled(Object *obj, Error **errp)
2322 {
2323 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2324
2325 return !!sev_snp_guest->kvm_finish_conf.auth_key_en;
2326 }
2327
2328 static void
sev_snp_guest_set_author_key_enabled(Object * obj,bool value,Error ** errp)2329 sev_snp_guest_set_author_key_enabled(Object *obj, bool value, Error **errp)
2330 {
2331 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2332
2333 sev_snp_guest->kvm_finish_conf.auth_key_en = value;
2334 }
2335
2336 static bool
sev_snp_guest_get_vcek_disabled(Object * obj,Error ** errp)2337 sev_snp_guest_get_vcek_disabled(Object *obj, Error **errp)
2338 {
2339 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2340
2341 return !!sev_snp_guest->kvm_finish_conf.vcek_disabled;
2342 }
2343
2344 static void
sev_snp_guest_set_vcek_disabled(Object * obj,bool value,Error ** errp)2345 sev_snp_guest_set_vcek_disabled(Object *obj, bool value, Error **errp)
2346 {
2347 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2348
2349 sev_snp_guest->kvm_finish_conf.vcek_disabled = value;
2350 }
2351
2352 static char *
sev_snp_guest_get_host_data(Object * obj,Error ** errp)2353 sev_snp_guest_get_host_data(Object *obj, Error **errp)
2354 {
2355 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2356
2357 return g_strdup(sev_snp_guest->host_data);
2358 }
2359
2360 static void
sev_snp_guest_set_host_data(Object * obj,const char * value,Error ** errp)2361 sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp)
2362 {
2363 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2364 struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
2365 g_autofree guchar *blob;
2366 gsize len;
2367
2368 g_free(sev_snp_guest->host_data);
2369
2370 /* store the base64 str so we don't need to re-encode in getter */
2371 sev_snp_guest->host_data = g_strdup(value);
2372
2373 blob = qbase64_decode(sev_snp_guest->host_data, -1, &len, errp);
2374
2375 if (!blob) {
2376 return;
2377 }
2378
2379 if (len != sizeof(finish->host_data)) {
2380 error_setg(errp, "parameter length of %" G_GSIZE_FORMAT
2381 " not equal to %zu",
2382 len, sizeof(finish->host_data));
2383 return;
2384 }
2385
2386 memcpy(finish->host_data, blob, len);
2387 }
2388
2389 static void
sev_snp_guest_class_init(ObjectClass * oc,const void * data)2390 sev_snp_guest_class_init(ObjectClass *oc, const void *data)
2391 {
2392 SevCommonStateClass *klass = SEV_COMMON_CLASS(oc);
2393 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
2394
2395 klass->build_kernel_loader_hashes = sev_snp_build_kernel_loader_hashes;
2396 klass->launch_start = sev_snp_launch_start;
2397 klass->launch_finish = sev_snp_launch_finish;
2398 klass->launch_update_data = sev_snp_launch_update_data;
2399 klass->kvm_init = sev_snp_kvm_init;
2400 x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
2401 x86_klass->kvm_type = sev_snp_kvm_type;
2402
2403 object_class_property_add(oc, "policy", "uint64",
2404 sev_snp_guest_get_policy,
2405 sev_snp_guest_set_policy, NULL, NULL);
2406 object_class_property_add_str(oc, "guest-visible-workarounds",
2407 sev_snp_guest_get_guest_visible_workarounds,
2408 sev_snp_guest_set_guest_visible_workarounds);
2409 object_class_property_add_str(oc, "id-block",
2410 sev_snp_guest_get_id_block,
2411 sev_snp_guest_set_id_block);
2412 object_class_property_add_str(oc, "id-auth",
2413 sev_snp_guest_get_id_auth,
2414 sev_snp_guest_set_id_auth);
2415 object_class_property_add_bool(oc, "author-key-enabled",
2416 sev_snp_guest_get_author_key_enabled,
2417 sev_snp_guest_set_author_key_enabled);
2418 object_class_property_add_bool(oc, "vcek-disabled",
2419 sev_snp_guest_get_vcek_disabled,
2420 sev_snp_guest_set_vcek_disabled);
2421 object_class_property_add_str(oc, "host-data",
2422 sev_snp_guest_get_host_data,
2423 sev_snp_guest_set_host_data);
2424 }
2425
2426 static void
sev_snp_guest_instance_init(Object * obj)2427 sev_snp_guest_instance_init(Object *obj)
2428 {
2429 ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
2430 SevSnpGuestState *sev_snp_guest = SEV_SNP_GUEST(obj);
2431
2432 cgs->require_guest_memfd = true;
2433
2434 /* default init/start/finish params for kvm */
2435 sev_snp_guest->kvm_start_conf.policy = DEFAULT_SEV_SNP_POLICY;
2436 }
2437
2438 /* guest info specific to sev-snp */
2439 static const TypeInfo sev_snp_guest_info = {
2440 .parent = TYPE_SEV_COMMON,
2441 .name = TYPE_SEV_SNP_GUEST,
2442 .instance_size = sizeof(SevSnpGuestState),
2443 .class_init = sev_snp_guest_class_init,
2444 .instance_init = sev_snp_guest_instance_init,
2445 };
2446
2447 static void
sev_register_types(void)2448 sev_register_types(void)
2449 {
2450 type_register_static(&sev_common_info);
2451 type_register_static(&sev_guest_info);
2452 type_register_static(&sev_snp_guest_info);
2453 }
2454
2455 type_init(sev_register_types);
2456