xref: /qemu/target/i386/kvm/tdx.c (revision ae60ff4e9f9e5790f79abf866ec67270c28ca477)
1 /*
2  * QEMU TDX support
3  *
4  * Copyright (c) 2025 Intel Corporation
5  *
6  * Author:
7  *      Xiaoyao Li <xiaoyao.li@intel.com>
8  *
9  * SPDX-License-Identifier: GPL-2.0-or-later
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "qemu/base64.h"
15 #include "qemu/mmap-alloc.h"
16 #include "qapi/error.h"
17 #include "qom/object_interfaces.h"
18 #include "crypto/hash.h"
19 #include "system/system.h"
20 #include "system/ramblock.h"
21 
22 #include "hw/i386/e820_memory_layout.h"
23 #include "hw/i386/tdvf.h"
24 #include "hw/i386/x86.h"
25 #include "hw/i386/tdvf-hob.h"
26 #include "kvm_i386.h"
27 #include "tdx.h"
28 
29 #define TDX_MIN_TSC_FREQUENCY_KHZ   (100 * 1000)
30 #define TDX_MAX_TSC_FREQUENCY_KHZ   (10 * 1000 * 1000)
31 
32 #define TDX_TD_ATTRIBUTES_DEBUG             BIT_ULL(0)
33 #define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE   BIT_ULL(28)
34 #define TDX_TD_ATTRIBUTES_PKS               BIT_ULL(30)
35 #define TDX_TD_ATTRIBUTES_PERFMON           BIT_ULL(63)
36 
37 #define TDX_SUPPORTED_TD_ATTRS  (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\
38                                  TDX_TD_ATTRIBUTES_PKS | \
39                                  TDX_TD_ATTRIBUTES_PERFMON)
40 
41 static TdxGuest *tdx_guest;
42 
43 static struct kvm_tdx_capabilities *tdx_caps;
44 
45 /* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */
46 bool is_tdx_vm(void)
47 {
48     return !!tdx_guest;
49 }
50 
51 enum tdx_ioctl_level {
52     TDX_VM_IOCTL,
53     TDX_VCPU_IOCTL,
54 };
55 
56 static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state,
57                               int cmd_id, __u32 flags, void *data,
58                               Error **errp)
59 {
60     struct kvm_tdx_cmd tdx_cmd = {};
61     int r;
62 
63     const char *tdx_ioctl_name[] = {
64         [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES",
65         [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM",
66         [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU",
67         [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION",
68         [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM",
69         [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID",
70     };
71 
72     tdx_cmd.id = cmd_id;
73     tdx_cmd.flags = flags;
74     tdx_cmd.data = (__u64)(unsigned long)data;
75 
76     switch (level) {
77     case TDX_VM_IOCTL:
78         r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
79         break;
80     case TDX_VCPU_IOCTL:
81         r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
82         break;
83     default:
84         error_setg(errp, "Invalid tdx_ioctl_level %d", level);
85         return -EINVAL;
86     }
87 
88     if (r < 0) {
89         error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx",
90                          tdx_ioctl_name[cmd_id], tdx_cmd.hw_error);
91     }
92     return r;
93 }
94 
95 static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data,
96                                Error **errp)
97 {
98     return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp);
99 }
100 
101 static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags,
102                                  void *data, Error **errp)
103 {
104     return  tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp);
105 }
106 
107 static int get_tdx_capabilities(Error **errp)
108 {
109     struct kvm_tdx_capabilities *caps;
110     /* 1st generation of TDX reports 6 cpuid configs */
111     int nr_cpuid_configs = 6;
112     size_t size;
113     int r;
114 
115     do {
116         Error *local_err = NULL;
117         size = sizeof(struct kvm_tdx_capabilities) +
118                       nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2);
119         caps = g_malloc0(size);
120         caps->cpuid.nent = nr_cpuid_configs;
121 
122         r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err);
123         if (r == -E2BIG) {
124             g_free(caps);
125             nr_cpuid_configs *= 2;
126             if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
127                 error_report("KVM TDX seems broken that number of CPUID entries"
128                              " in kvm_tdx_capabilities exceeds limit: %d",
129                              KVM_MAX_CPUID_ENTRIES);
130                 error_propagate(errp, local_err);
131                 return r;
132             }
133             error_free(local_err);
134         } else if (r < 0) {
135             g_free(caps);
136             error_propagate(errp, local_err);
137             return r;
138         }
139     } while (r == -E2BIG);
140 
141     tdx_caps = caps;
142 
143     return 0;
144 }
145 
146 void tdx_set_tdvf_region(MemoryRegion *tdvf_mr)
147 {
148     assert(!tdx_guest->tdvf_mr);
149     tdx_guest->tdvf_mr = tdvf_mr;
150 }
151 
152 static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx)
153 {
154     TdxFirmwareEntry *entry;
155 
156     for_each_tdx_fw_entry(&tdx->tdvf, entry) {
157         if (entry->type == TDVF_SECTION_TYPE_TD_HOB) {
158             return entry;
159         }
160     }
161     error_report("TDVF metadata doesn't specify TD_HOB location.");
162     exit(1);
163 }
164 
165 static void tdx_add_ram_entry(uint64_t address, uint64_t length,
166                               enum TdxRamType type)
167 {
168     uint32_t nr_entries = tdx_guest->nr_ram_entries;
169     tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries,
170                                      nr_entries + 1);
171 
172     tdx_guest->ram_entries[nr_entries].address = address;
173     tdx_guest->ram_entries[nr_entries].length = length;
174     tdx_guest->ram_entries[nr_entries].type = type;
175     tdx_guest->nr_ram_entries++;
176 }
177 
178 static int tdx_accept_ram_range(uint64_t address, uint64_t length)
179 {
180     uint64_t head_start, tail_start, head_length, tail_length;
181     uint64_t tmp_address, tmp_length;
182     TdxRamEntry *e;
183     int i = 0;
184 
185     do {
186         if (i == tdx_guest->nr_ram_entries) {
187             return -1;
188         }
189 
190         e = &tdx_guest->ram_entries[i++];
191     } while (address + length <= e->address || address >= e->address + e->length);
192 
193     /*
194      * The to-be-accepted ram range must be fully contained by one
195      * RAM entry.
196      */
197     if (e->address > address ||
198         e->address + e->length < address + length) {
199         return -1;
200     }
201 
202     if (e->type == TDX_RAM_ADDED) {
203         return 0;
204     }
205 
206     tmp_address = e->address;
207     tmp_length = e->length;
208 
209     e->address = address;
210     e->length = length;
211     e->type = TDX_RAM_ADDED;
212 
213     head_length = address - tmp_address;
214     if (head_length > 0) {
215         head_start = tmp_address;
216         tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED);
217     }
218 
219     tail_start = address + length;
220     if (tail_start < tmp_address + tmp_length) {
221         tail_length = tmp_address + tmp_length - tail_start;
222         tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED);
223     }
224 
225     return 0;
226 }
227 
228 static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_)
229 {
230     const TdxRamEntry *lhs = lhs_;
231     const TdxRamEntry *rhs = rhs_;
232 
233     if (lhs->address == rhs->address) {
234         return 0;
235     }
236     if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) {
237         return 1;
238     }
239     return -1;
240 }
241 
242 static void tdx_init_ram_entries(void)
243 {
244     unsigned i, j, nr_e820_entries;
245 
246     nr_e820_entries = e820_get_table(NULL);
247     tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries);
248 
249     for (i = 0, j = 0; i < nr_e820_entries; i++) {
250         uint64_t addr, len;
251 
252         if (e820_get_entry(i, E820_RAM, &addr, &len)) {
253             tdx_guest->ram_entries[j].address = addr;
254             tdx_guest->ram_entries[j].length = len;
255             tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED;
256             j++;
257         }
258     }
259     tdx_guest->nr_ram_entries = j;
260 }
261 
262 static void tdx_post_init_vcpus(void)
263 {
264     TdxFirmwareEntry *hob;
265     CPUState *cpu;
266 
267     hob = tdx_get_hob_entry(tdx_guest);
268     CPU_FOREACH(cpu) {
269         tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)hob->address,
270                        &error_fatal);
271     }
272 }
273 
274 static void tdx_finalize_vm(Notifier *notifier, void *unused)
275 {
276     TdxFirmware *tdvf = &tdx_guest->tdvf;
277     TdxFirmwareEntry *entry;
278     RAMBlock *ram_block;
279     Error *local_err = NULL;
280     int r;
281 
282     tdx_init_ram_entries();
283 
284     for_each_tdx_fw_entry(tdvf, entry) {
285         switch (entry->type) {
286         case TDVF_SECTION_TYPE_BFV:
287         case TDVF_SECTION_TYPE_CFV:
288             entry->mem_ptr = tdvf->mem_ptr + entry->data_offset;
289             break;
290         case TDVF_SECTION_TYPE_TD_HOB:
291         case TDVF_SECTION_TYPE_TEMP_MEM:
292             entry->mem_ptr = qemu_ram_mmap(-1, entry->size,
293                                            qemu_real_host_page_size(), 0, 0);
294             if (entry->mem_ptr == MAP_FAILED) {
295                 error_report("Failed to mmap memory for TDVF section %d",
296                              entry->type);
297                 exit(1);
298             }
299             if (tdx_accept_ram_range(entry->address, entry->size)) {
300                 error_report("Failed to accept memory for TDVF section %d",
301                              entry->type);
302                 qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
303                 exit(1);
304             }
305             break;
306         default:
307             error_report("Unsupported TDVF section %d", entry->type);
308             exit(1);
309         }
310     }
311 
312     qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries,
313           sizeof(TdxRamEntry), &tdx_ram_entry_compare);
314 
315     tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest));
316 
317     tdx_post_init_vcpus();
318 
319     for_each_tdx_fw_entry(tdvf, entry) {
320         struct kvm_tdx_init_mem_region region;
321         uint32_t flags;
322 
323         region = (struct kvm_tdx_init_mem_region) {
324             .source_addr = (uint64_t)entry->mem_ptr,
325             .gpa = entry->address,
326             .nr_pages = entry->size >> 12,
327         };
328 
329         flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ?
330                 KVM_TDX_MEASURE_MEMORY_REGION : 0;
331 
332         do {
333             error_free(local_err);
334             local_err = NULL;
335             r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags,
336                                &region, &local_err);
337         } while (r == -EAGAIN || r == -EINTR);
338         if (r < 0) {
339             error_report_err(local_err);
340             exit(1);
341         }
342 
343         if (entry->type == TDVF_SECTION_TYPE_TD_HOB ||
344             entry->type == TDVF_SECTION_TYPE_TEMP_MEM) {
345             qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
346             entry->mem_ptr = NULL;
347         }
348     }
349 
350     /*
351      * TDVF image has been copied into private region above via
352      * KVM_MEMORY_MAPPING. It becomes useless.
353      */
354     ram_block = tdx_guest->tdvf_mr->ram_block;
355     ram_block_discard_range(ram_block, 0, ram_block->max_length);
356 
357     tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal);
358     CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true;
359 }
360 
361 static Notifier tdx_machine_done_notify = {
362     .notify = tdx_finalize_vm,
363 };
364 
365 static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
366 {
367     TdxGuest *tdx = TDX_GUEST(cgs);
368     int r = 0;
369 
370     kvm_mark_guest_state_protected();
371 
372     if (!tdx_caps) {
373         r = get_tdx_capabilities(errp);
374         if (r) {
375             return r;
376         }
377     }
378 
379     qemu_add_machine_init_done_notifier(&tdx_machine_done_notify);
380 
381     tdx_guest = tdx;
382     return 0;
383 }
384 
385 static int tdx_kvm_type(X86ConfidentialGuest *cg)
386 {
387     /* Do the object check */
388     TDX_GUEST(cg);
389 
390     return KVM_X86_TDX_VM;
391 }
392 
393 static int tdx_validate_attributes(TdxGuest *tdx, Error **errp)
394 {
395     if ((tdx->attributes & ~tdx_caps->supported_attrs)) {
396         error_setg(errp, "Invalid attributes 0x%lx for TDX VM "
397                    "(KVM supported: 0x%llx)", tdx->attributes,
398                    tdx_caps->supported_attrs);
399         return -1;
400     }
401 
402     if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) {
403         error_setg(errp, "Some QEMU unsupported TD attribute bits being "
404                     "requested: 0x%lx (QEMU supported: 0x%llx)",
405                     tdx->attributes, TDX_SUPPORTED_TD_ATTRS);
406         return -1;
407     }
408 
409     return 0;
410 }
411 
412 static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp)
413 {
414     CPUX86State *env = &x86cpu->env;
415 
416     tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ?
417                              TDX_TD_ATTRIBUTES_PKS : 0;
418     tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0;
419 
420     return tdx_validate_attributes(tdx_guest, errp);
421 }
422 
423 static int setup_td_xfam(X86CPU *x86cpu, Error **errp)
424 {
425     CPUX86State *env = &x86cpu->env;
426     uint64_t xfam;
427 
428     xfam = env->features[FEAT_XSAVE_XCR0_LO] |
429            env->features[FEAT_XSAVE_XCR0_HI] |
430            env->features[FEAT_XSAVE_XSS_LO] |
431            env->features[FEAT_XSAVE_XSS_HI];
432 
433     if (xfam & ~tdx_caps->supported_xfam) {
434         error_setg(errp, "Invalid XFAM 0x%lx for TDX VM (supported: 0x%llx))",
435                    xfam, tdx_caps->supported_xfam);
436         return -1;
437     }
438 
439     tdx_guest->xfam = xfam;
440     return 0;
441 }
442 
443 static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids)
444 {
445     int i, dest_cnt = 0;
446     struct kvm_cpuid_entry2 *src, *dest, *conf;
447 
448     for (i = 0; i < cpuids->nent; i++) {
449         src = cpuids->entries + i;
450         conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index);
451         if (!conf) {
452             continue;
453         }
454         dest = cpuids->entries + dest_cnt;
455 
456         dest->function = src->function;
457         dest->index = src->index;
458         dest->flags = src->flags;
459         dest->eax = src->eax & conf->eax;
460         dest->ebx = src->ebx & conf->ebx;
461         dest->ecx = src->ecx & conf->ecx;
462         dest->edx = src->edx & conf->edx;
463 
464         dest_cnt++;
465     }
466     cpuids->nent = dest_cnt++;
467 }
468 
469 int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
470 {
471     X86CPU *x86cpu = X86_CPU(cpu);
472     CPUX86State *env = &x86cpu->env;
473     g_autofree struct kvm_tdx_init_vm *init_vm = NULL;
474     Error *local_err = NULL;
475     size_t data_len;
476     int retry = 10000;
477     int r = 0;
478 
479     QEMU_LOCK_GUARD(&tdx_guest->lock);
480     if (tdx_guest->initialized) {
481         return r;
482     }
483 
484     init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) +
485                         sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
486 
487     if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) {
488         error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS");
489         return -EOPNOTSUPP;
490     }
491 
492     r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
493                           0, TDX_APIC_BUS_CYCLES_NS);
494     if (r < 0) {
495         error_setg_errno(errp, -r,
496                          "Unable to set core crystal clock frequency to 25MHz");
497         return r;
498     }
499 
500     if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ ||
501                          env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) {
502         error_setg(errp, "Invalid TSC %ld KHz, must specify cpu_frequency "
503                          "between [%d, %d] kHz", env->tsc_khz,
504                          TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ);
505        return -EINVAL;
506     }
507 
508     if (env->tsc_khz % (25 * 1000)) {
509         error_setg(errp, "Invalid TSC %ld KHz, it must be multiple of 25MHz",
510                    env->tsc_khz);
511         return -EINVAL;
512     }
513 
514     /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */
515     r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz);
516     if (r < 0) {
517         error_setg_errno(errp, -r, "Unable to set TSC frequency to %ld kHz",
518                          env->tsc_khz);
519         return r;
520     }
521 
522     if (tdx_guest->mrconfigid) {
523         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid,
524                               strlen(tdx_guest->mrconfigid), &data_len, errp);
525         if (!data) {
526             return -1;
527         }
528         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
529             error_setg(errp, "TDX: failed to decode mrconfigid");
530             return -1;
531         }
532         memcpy(init_vm->mrconfigid, data, data_len);
533     }
534 
535     if (tdx_guest->mrowner) {
536         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner,
537                               strlen(tdx_guest->mrowner), &data_len, errp);
538         if (!data) {
539             return -1;
540         }
541         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
542             error_setg(errp, "TDX: failed to decode mrowner");
543             return -1;
544         }
545         memcpy(init_vm->mrowner, data, data_len);
546     }
547 
548     if (tdx_guest->mrownerconfig) {
549         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig,
550                             strlen(tdx_guest->mrownerconfig), &data_len, errp);
551         if (!data) {
552             return -1;
553         }
554         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
555             error_setg(errp, "TDX: failed to decode mrownerconfig");
556             return -1;
557         }
558         memcpy(init_vm->mrownerconfig, data, data_len);
559     }
560 
561     r = setup_td_guest_attributes(x86cpu, errp);
562     if (r) {
563         return r;
564     }
565 
566     r = setup_td_xfam(x86cpu, errp);
567     if (r) {
568         return r;
569     }
570 
571     init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0);
572     tdx_filter_cpuid(&init_vm->cpuid);
573 
574     init_vm->attributes = tdx_guest->attributes;
575     init_vm->xfam = tdx_guest->xfam;
576 
577     /*
578      * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE)
579      * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or
580      * RDSEED) is busy.
581      *
582      * Retry for the case.
583      */
584     do {
585         error_free(local_err);
586         local_err = NULL;
587         r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err);
588     } while (r == -EAGAIN && --retry);
589 
590     if (r < 0) {
591         if (!retry) {
592             error_append_hint(&local_err, "Hardware RNG (Random Number "
593             "Generator) is busy occupied by someone (via RDRAND/RDSEED) "
594             "maliciously, which leads to KVM_TDX_INIT_VM keeping failure "
595             "due to lack of entropy.\n");
596         }
597         error_propagate(errp, local_err);
598         return r;
599     }
600 
601     tdx_guest->initialized = true;
602 
603     return 0;
604 }
605 
606 int tdx_parse_tdvf(void *flash_ptr, int size)
607 {
608     return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size);
609 }
610 
611 static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp)
612 {
613     TdxGuest *tdx = TDX_GUEST(obj);
614 
615     return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE);
616 }
617 
618 static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp)
619 {
620     TdxGuest *tdx = TDX_GUEST(obj);
621 
622     if (value) {
623         tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
624     } else {
625         tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
626     }
627 }
628 
629 static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp)
630 {
631     TdxGuest *tdx = TDX_GUEST(obj);
632 
633     return g_strdup(tdx->mrconfigid);
634 }
635 
636 static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp)
637 {
638     TdxGuest *tdx = TDX_GUEST(obj);
639 
640     g_free(tdx->mrconfigid);
641     tdx->mrconfigid = g_strdup(value);
642 }
643 
644 static char *tdx_guest_get_mrowner(Object *obj, Error **errp)
645 {
646     TdxGuest *tdx = TDX_GUEST(obj);
647 
648     return g_strdup(tdx->mrowner);
649 }
650 
651 static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp)
652 {
653     TdxGuest *tdx = TDX_GUEST(obj);
654 
655     g_free(tdx->mrowner);
656     tdx->mrowner = g_strdup(value);
657 }
658 
659 static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp)
660 {
661     TdxGuest *tdx = TDX_GUEST(obj);
662 
663     return g_strdup(tdx->mrownerconfig);
664 }
665 
666 static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp)
667 {
668     TdxGuest *tdx = TDX_GUEST(obj);
669 
670     g_free(tdx->mrownerconfig);
671     tdx->mrownerconfig = g_strdup(value);
672 }
673 
674 /* tdx guest */
675 OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest,
676                                    tdx_guest,
677                                    TDX_GUEST,
678                                    X86_CONFIDENTIAL_GUEST,
679                                    { TYPE_USER_CREATABLE },
680                                    { NULL })
681 
682 static void tdx_guest_init(Object *obj)
683 {
684     ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
685     TdxGuest *tdx = TDX_GUEST(obj);
686 
687     qemu_mutex_init(&tdx->lock);
688 
689     cgs->require_guest_memfd = true;
690     tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
691 
692     object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes,
693                                    OBJ_PROP_FLAG_READWRITE);
694     object_property_add_bool(obj, "sept-ve-disable",
695                              tdx_guest_get_sept_ve_disable,
696                              tdx_guest_set_sept_ve_disable);
697     object_property_add_str(obj, "mrconfigid",
698                             tdx_guest_get_mrconfigid,
699                             tdx_guest_set_mrconfigid);
700     object_property_add_str(obj, "mrowner",
701                             tdx_guest_get_mrowner, tdx_guest_set_mrowner);
702     object_property_add_str(obj, "mrownerconfig",
703                             tdx_guest_get_mrownerconfig,
704                             tdx_guest_set_mrownerconfig);
705 }
706 
707 static void tdx_guest_finalize(Object *obj)
708 {
709 }
710 
711 static void tdx_guest_class_init(ObjectClass *oc, const void *data)
712 {
713     ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
714     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
715 
716     klass->kvm_init = tdx_kvm_init;
717     x86_klass->kvm_type = tdx_kvm_type;
718 }
719