xref: /qemu/target/i386/kvm/tdx.c (revision 9002494f80b751a7655045c5f46bf90bc1d3bbd0)
1 /*
2  * QEMU TDX support
3  *
4  * Copyright (c) 2025 Intel Corporation
5  *
6  * Author:
7  *      Xiaoyao Li <xiaoyao.li@intel.com>
8  *
9  * SPDX-License-Identifier: GPL-2.0-or-later
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "qemu/base64.h"
15 #include "qemu/mmap-alloc.h"
16 #include "qapi/error.h"
17 #include "qom/object_interfaces.h"
18 #include "crypto/hash.h"
19 #include "system/runstate.h"
20 #include "system/system.h"
21 #include "system/ramblock.h"
22 
23 #include <linux/kvm_para.h>
24 
25 #include "hw/i386/e820_memory_layout.h"
26 #include "hw/i386/tdvf.h"
27 #include "hw/i386/x86.h"
28 #include "hw/i386/tdvf-hob.h"
29 #include "kvm_i386.h"
30 #include "tdx.h"
31 
32 #define TDX_MIN_TSC_FREQUENCY_KHZ   (100 * 1000)
33 #define TDX_MAX_TSC_FREQUENCY_KHZ   (10 * 1000 * 1000)
34 
35 #define TDX_TD_ATTRIBUTES_DEBUG             BIT_ULL(0)
36 #define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE   BIT_ULL(28)
37 #define TDX_TD_ATTRIBUTES_PKS               BIT_ULL(30)
38 #define TDX_TD_ATTRIBUTES_PERFMON           BIT_ULL(63)
39 
40 #define TDX_SUPPORTED_TD_ATTRS  (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\
41                                  TDX_TD_ATTRIBUTES_PKS | \
42                                  TDX_TD_ATTRIBUTES_PERFMON)
43 
44 static TdxGuest *tdx_guest;
45 
46 static struct kvm_tdx_capabilities *tdx_caps;
47 
48 /* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */
49 bool is_tdx_vm(void)
50 {
51     return !!tdx_guest;
52 }
53 
54 enum tdx_ioctl_level {
55     TDX_VM_IOCTL,
56     TDX_VCPU_IOCTL,
57 };
58 
59 static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state,
60                               int cmd_id, __u32 flags, void *data,
61                               Error **errp)
62 {
63     struct kvm_tdx_cmd tdx_cmd = {};
64     int r;
65 
66     const char *tdx_ioctl_name[] = {
67         [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES",
68         [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM",
69         [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU",
70         [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION",
71         [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM",
72         [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID",
73     };
74 
75     tdx_cmd.id = cmd_id;
76     tdx_cmd.flags = flags;
77     tdx_cmd.data = (__u64)(unsigned long)data;
78 
79     switch (level) {
80     case TDX_VM_IOCTL:
81         r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
82         break;
83     case TDX_VCPU_IOCTL:
84         r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
85         break;
86     default:
87         error_setg(errp, "Invalid tdx_ioctl_level %d", level);
88         return -EINVAL;
89     }
90 
91     if (r < 0) {
92         error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx",
93                          tdx_ioctl_name[cmd_id], tdx_cmd.hw_error);
94     }
95     return r;
96 }
97 
98 static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data,
99                                Error **errp)
100 {
101     return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp);
102 }
103 
104 static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags,
105                                  void *data, Error **errp)
106 {
107     return  tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp);
108 }
109 
110 static int get_tdx_capabilities(Error **errp)
111 {
112     struct kvm_tdx_capabilities *caps;
113     /* 1st generation of TDX reports 6 cpuid configs */
114     int nr_cpuid_configs = 6;
115     size_t size;
116     int r;
117 
118     do {
119         Error *local_err = NULL;
120         size = sizeof(struct kvm_tdx_capabilities) +
121                       nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2);
122         caps = g_malloc0(size);
123         caps->cpuid.nent = nr_cpuid_configs;
124 
125         r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err);
126         if (r == -E2BIG) {
127             g_free(caps);
128             nr_cpuid_configs *= 2;
129             if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
130                 error_report("KVM TDX seems broken that number of CPUID entries"
131                              " in kvm_tdx_capabilities exceeds limit: %d",
132                              KVM_MAX_CPUID_ENTRIES);
133                 error_propagate(errp, local_err);
134                 return r;
135             }
136             error_free(local_err);
137         } else if (r < 0) {
138             g_free(caps);
139             error_propagate(errp, local_err);
140             return r;
141         }
142     } while (r == -E2BIG);
143 
144     tdx_caps = caps;
145 
146     return 0;
147 }
148 
149 void tdx_set_tdvf_region(MemoryRegion *tdvf_mr)
150 {
151     assert(!tdx_guest->tdvf_mr);
152     tdx_guest->tdvf_mr = tdvf_mr;
153 }
154 
155 static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx)
156 {
157     TdxFirmwareEntry *entry;
158 
159     for_each_tdx_fw_entry(&tdx->tdvf, entry) {
160         if (entry->type == TDVF_SECTION_TYPE_TD_HOB) {
161             return entry;
162         }
163     }
164     error_report("TDVF metadata doesn't specify TD_HOB location.");
165     exit(1);
166 }
167 
168 static void tdx_add_ram_entry(uint64_t address, uint64_t length,
169                               enum TdxRamType type)
170 {
171     uint32_t nr_entries = tdx_guest->nr_ram_entries;
172     tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries,
173                                      nr_entries + 1);
174 
175     tdx_guest->ram_entries[nr_entries].address = address;
176     tdx_guest->ram_entries[nr_entries].length = length;
177     tdx_guest->ram_entries[nr_entries].type = type;
178     tdx_guest->nr_ram_entries++;
179 }
180 
181 static int tdx_accept_ram_range(uint64_t address, uint64_t length)
182 {
183     uint64_t head_start, tail_start, head_length, tail_length;
184     uint64_t tmp_address, tmp_length;
185     TdxRamEntry *e;
186     int i = 0;
187 
188     do {
189         if (i == tdx_guest->nr_ram_entries) {
190             return -1;
191         }
192 
193         e = &tdx_guest->ram_entries[i++];
194     } while (address + length <= e->address || address >= e->address + e->length);
195 
196     /*
197      * The to-be-accepted ram range must be fully contained by one
198      * RAM entry.
199      */
200     if (e->address > address ||
201         e->address + e->length < address + length) {
202         return -1;
203     }
204 
205     if (e->type == TDX_RAM_ADDED) {
206         return 0;
207     }
208 
209     tmp_address = e->address;
210     tmp_length = e->length;
211 
212     e->address = address;
213     e->length = length;
214     e->type = TDX_RAM_ADDED;
215 
216     head_length = address - tmp_address;
217     if (head_length > 0) {
218         head_start = tmp_address;
219         tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED);
220     }
221 
222     tail_start = address + length;
223     if (tail_start < tmp_address + tmp_length) {
224         tail_length = tmp_address + tmp_length - tail_start;
225         tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED);
226     }
227 
228     return 0;
229 }
230 
231 static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_)
232 {
233     const TdxRamEntry *lhs = lhs_;
234     const TdxRamEntry *rhs = rhs_;
235 
236     if (lhs->address == rhs->address) {
237         return 0;
238     }
239     if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) {
240         return 1;
241     }
242     return -1;
243 }
244 
245 static void tdx_init_ram_entries(void)
246 {
247     unsigned i, j, nr_e820_entries;
248 
249     nr_e820_entries = e820_get_table(NULL);
250     tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries);
251 
252     for (i = 0, j = 0; i < nr_e820_entries; i++) {
253         uint64_t addr, len;
254 
255         if (e820_get_entry(i, E820_RAM, &addr, &len)) {
256             tdx_guest->ram_entries[j].address = addr;
257             tdx_guest->ram_entries[j].length = len;
258             tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED;
259             j++;
260         }
261     }
262     tdx_guest->nr_ram_entries = j;
263 }
264 
265 static void tdx_post_init_vcpus(void)
266 {
267     TdxFirmwareEntry *hob;
268     CPUState *cpu;
269 
270     hob = tdx_get_hob_entry(tdx_guest);
271     CPU_FOREACH(cpu) {
272         tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)hob->address,
273                        &error_fatal);
274     }
275 }
276 
277 static void tdx_finalize_vm(Notifier *notifier, void *unused)
278 {
279     TdxFirmware *tdvf = &tdx_guest->tdvf;
280     TdxFirmwareEntry *entry;
281     RAMBlock *ram_block;
282     Error *local_err = NULL;
283     int r;
284 
285     tdx_init_ram_entries();
286 
287     for_each_tdx_fw_entry(tdvf, entry) {
288         switch (entry->type) {
289         case TDVF_SECTION_TYPE_BFV:
290         case TDVF_SECTION_TYPE_CFV:
291             entry->mem_ptr = tdvf->mem_ptr + entry->data_offset;
292             break;
293         case TDVF_SECTION_TYPE_TD_HOB:
294         case TDVF_SECTION_TYPE_TEMP_MEM:
295             entry->mem_ptr = qemu_ram_mmap(-1, entry->size,
296                                            qemu_real_host_page_size(), 0, 0);
297             if (entry->mem_ptr == MAP_FAILED) {
298                 error_report("Failed to mmap memory for TDVF section %d",
299                              entry->type);
300                 exit(1);
301             }
302             if (tdx_accept_ram_range(entry->address, entry->size)) {
303                 error_report("Failed to accept memory for TDVF section %d",
304                              entry->type);
305                 qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
306                 exit(1);
307             }
308             break;
309         default:
310             error_report("Unsupported TDVF section %d", entry->type);
311             exit(1);
312         }
313     }
314 
315     qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries,
316           sizeof(TdxRamEntry), &tdx_ram_entry_compare);
317 
318     tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest));
319 
320     tdx_post_init_vcpus();
321 
322     for_each_tdx_fw_entry(tdvf, entry) {
323         struct kvm_tdx_init_mem_region region;
324         uint32_t flags;
325 
326         region = (struct kvm_tdx_init_mem_region) {
327             .source_addr = (uint64_t)entry->mem_ptr,
328             .gpa = entry->address,
329             .nr_pages = entry->size >> 12,
330         };
331 
332         flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ?
333                 KVM_TDX_MEASURE_MEMORY_REGION : 0;
334 
335         do {
336             error_free(local_err);
337             local_err = NULL;
338             r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags,
339                                &region, &local_err);
340         } while (r == -EAGAIN || r == -EINTR);
341         if (r < 0) {
342             error_report_err(local_err);
343             exit(1);
344         }
345 
346         if (entry->type == TDVF_SECTION_TYPE_TD_HOB ||
347             entry->type == TDVF_SECTION_TYPE_TEMP_MEM) {
348             qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
349             entry->mem_ptr = NULL;
350         }
351     }
352 
353     /*
354      * TDVF image has been copied into private region above via
355      * KVM_MEMORY_MAPPING. It becomes useless.
356      */
357     ram_block = tdx_guest->tdvf_mr->ram_block;
358     ram_block_discard_range(ram_block, 0, ram_block->max_length);
359 
360     tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal);
361     CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true;
362 }
363 
364 static Notifier tdx_machine_done_notify = {
365     .notify = tdx_finalize_vm,
366 };
367 
368 static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
369 {
370     TdxGuest *tdx = TDX_GUEST(cgs);
371     int r = 0;
372 
373     kvm_mark_guest_state_protected();
374 
375     if (!tdx_caps) {
376         r = get_tdx_capabilities(errp);
377         if (r) {
378             return r;
379         }
380     }
381 
382     /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */
383     if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
384         return -EOPNOTSUPP;
385     }
386 
387     qemu_add_machine_init_done_notifier(&tdx_machine_done_notify);
388 
389     tdx_guest = tdx;
390     return 0;
391 }
392 
393 static int tdx_kvm_type(X86ConfidentialGuest *cg)
394 {
395     /* Do the object check */
396     TDX_GUEST(cg);
397 
398     return KVM_X86_TDX_VM;
399 }
400 
401 static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu)
402 {
403     X86CPU *x86cpu = X86_CPU(cpu);
404 
405     object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort);
406 
407     x86cpu->enable_cpuid_0x1f = true;
408 }
409 
410 static int tdx_validate_attributes(TdxGuest *tdx, Error **errp)
411 {
412     if ((tdx->attributes & ~tdx_caps->supported_attrs)) {
413         error_setg(errp, "Invalid attributes 0x%lx for TDX VM "
414                    "(KVM supported: 0x%llx)", tdx->attributes,
415                    tdx_caps->supported_attrs);
416         return -1;
417     }
418 
419     if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) {
420         error_setg(errp, "Some QEMU unsupported TD attribute bits being "
421                     "requested: 0x%lx (QEMU supported: 0x%llx)",
422                     tdx->attributes, TDX_SUPPORTED_TD_ATTRS);
423         return -1;
424     }
425 
426     return 0;
427 }
428 
429 static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp)
430 {
431     CPUX86State *env = &x86cpu->env;
432 
433     tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ?
434                              TDX_TD_ATTRIBUTES_PKS : 0;
435     tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0;
436 
437     return tdx_validate_attributes(tdx_guest, errp);
438 }
439 
440 static int setup_td_xfam(X86CPU *x86cpu, Error **errp)
441 {
442     CPUX86State *env = &x86cpu->env;
443     uint64_t xfam;
444 
445     xfam = env->features[FEAT_XSAVE_XCR0_LO] |
446            env->features[FEAT_XSAVE_XCR0_HI] |
447            env->features[FEAT_XSAVE_XSS_LO] |
448            env->features[FEAT_XSAVE_XSS_HI];
449 
450     if (xfam & ~tdx_caps->supported_xfam) {
451         error_setg(errp, "Invalid XFAM 0x%lx for TDX VM (supported: 0x%llx))",
452                    xfam, tdx_caps->supported_xfam);
453         return -1;
454     }
455 
456     tdx_guest->xfam = xfam;
457     return 0;
458 }
459 
460 static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids)
461 {
462     int i, dest_cnt = 0;
463     struct kvm_cpuid_entry2 *src, *dest, *conf;
464 
465     for (i = 0; i < cpuids->nent; i++) {
466         src = cpuids->entries + i;
467         conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index);
468         if (!conf) {
469             continue;
470         }
471         dest = cpuids->entries + dest_cnt;
472 
473         dest->function = src->function;
474         dest->index = src->index;
475         dest->flags = src->flags;
476         dest->eax = src->eax & conf->eax;
477         dest->ebx = src->ebx & conf->ebx;
478         dest->ecx = src->ecx & conf->ecx;
479         dest->edx = src->edx & conf->edx;
480 
481         dest_cnt++;
482     }
483     cpuids->nent = dest_cnt++;
484 }
485 
486 int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
487 {
488     X86CPU *x86cpu = X86_CPU(cpu);
489     CPUX86State *env = &x86cpu->env;
490     g_autofree struct kvm_tdx_init_vm *init_vm = NULL;
491     Error *local_err = NULL;
492     size_t data_len;
493     int retry = 10000;
494     int r = 0;
495 
496     QEMU_LOCK_GUARD(&tdx_guest->lock);
497     if (tdx_guest->initialized) {
498         return r;
499     }
500 
501     init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) +
502                         sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
503 
504     if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) {
505         error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS");
506         return -EOPNOTSUPP;
507     }
508 
509     r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
510                           0, TDX_APIC_BUS_CYCLES_NS);
511     if (r < 0) {
512         error_setg_errno(errp, -r,
513                          "Unable to set core crystal clock frequency to 25MHz");
514         return r;
515     }
516 
517     if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ ||
518                          env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) {
519         error_setg(errp, "Invalid TSC %ld KHz, must specify cpu_frequency "
520                          "between [%d, %d] kHz", env->tsc_khz,
521                          TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ);
522        return -EINVAL;
523     }
524 
525     if (env->tsc_khz % (25 * 1000)) {
526         error_setg(errp, "Invalid TSC %ld KHz, it must be multiple of 25MHz",
527                    env->tsc_khz);
528         return -EINVAL;
529     }
530 
531     /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */
532     r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz);
533     if (r < 0) {
534         error_setg_errno(errp, -r, "Unable to set TSC frequency to %ld kHz",
535                          env->tsc_khz);
536         return r;
537     }
538 
539     if (tdx_guest->mrconfigid) {
540         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid,
541                               strlen(tdx_guest->mrconfigid), &data_len, errp);
542         if (!data) {
543             return -1;
544         }
545         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
546             error_setg(errp, "TDX: failed to decode mrconfigid");
547             return -1;
548         }
549         memcpy(init_vm->mrconfigid, data, data_len);
550     }
551 
552     if (tdx_guest->mrowner) {
553         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner,
554                               strlen(tdx_guest->mrowner), &data_len, errp);
555         if (!data) {
556             return -1;
557         }
558         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
559             error_setg(errp, "TDX: failed to decode mrowner");
560             return -1;
561         }
562         memcpy(init_vm->mrowner, data, data_len);
563     }
564 
565     if (tdx_guest->mrownerconfig) {
566         g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig,
567                             strlen(tdx_guest->mrownerconfig), &data_len, errp);
568         if (!data) {
569             return -1;
570         }
571         if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
572             error_setg(errp, "TDX: failed to decode mrownerconfig");
573             return -1;
574         }
575         memcpy(init_vm->mrownerconfig, data, data_len);
576     }
577 
578     r = setup_td_guest_attributes(x86cpu, errp);
579     if (r) {
580         return r;
581     }
582 
583     r = setup_td_xfam(x86cpu, errp);
584     if (r) {
585         return r;
586     }
587 
588     init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0);
589     tdx_filter_cpuid(&init_vm->cpuid);
590 
591     init_vm->attributes = tdx_guest->attributes;
592     init_vm->xfam = tdx_guest->xfam;
593 
594     /*
595      * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE)
596      * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or
597      * RDSEED) is busy.
598      *
599      * Retry for the case.
600      */
601     do {
602         error_free(local_err);
603         local_err = NULL;
604         r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err);
605     } while (r == -EAGAIN && --retry);
606 
607     if (r < 0) {
608         if (!retry) {
609             error_append_hint(&local_err, "Hardware RNG (Random Number "
610             "Generator) is busy occupied by someone (via RDRAND/RDSEED) "
611             "maliciously, which leads to KVM_TDX_INIT_VM keeping failure "
612             "due to lack of entropy.\n");
613         }
614         error_propagate(errp, local_err);
615         return r;
616     }
617 
618     tdx_guest->initialized = true;
619 
620     return 0;
621 }
622 
623 int tdx_parse_tdvf(void *flash_ptr, int size)
624 {
625     return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size);
626 }
627 
628 static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code,
629                                         char *message, uint64_t gpa)
630 {
631     GuestPanicInformation *panic_info;
632 
633     panic_info = g_new0(GuestPanicInformation, 1);
634     panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX;
635     panic_info->u.tdx.error_code = (uint32_t) error_code;
636     panic_info->u.tdx.message = message;
637     panic_info->u.tdx.gpa = gpa;
638 
639     qemu_system_guest_panicked(panic_info);
640 }
641 
642 /*
643  * Only 8 registers can contain valid ASCII byte stream to form the fatal
644  * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX
645  */
646 #define TDX_FATAL_MESSAGE_MAX        64
647 
648 #define TDX_REPORT_FATAL_ERROR_GPA_VALID    BIT_ULL(63)
649 
650 int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
651 {
652     uint64_t error_code = run->system_event.data[R_R12];
653     uint64_t reg_mask = run->system_event.data[R_ECX];
654     char *message = NULL;
655     uint64_t *tmp;
656     uint64_t gpa = -1ull;
657 
658     if (error_code & 0xffff) {
659         error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%lx",
660                      error_code);
661         return -1;
662     }
663 
664     if (reg_mask) {
665         message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1);
666         tmp = (uint64_t *)message;
667 
668 #define COPY_REG(REG)                               \
669     do {                                            \
670         if (reg_mask & BIT_ULL(REG)) {              \
671             *(tmp++) = run->system_event.data[REG]; \
672         }                                           \
673     } while (0)
674 
675         COPY_REG(R_R14);
676         COPY_REG(R_R15);
677         COPY_REG(R_EBX);
678         COPY_REG(R_EDI);
679         COPY_REG(R_ESI);
680         COPY_REG(R_R8);
681         COPY_REG(R_R9);
682         COPY_REG(R_EDX);
683         *((char *)tmp) = '\0';
684     }
685 #undef COPY_REG
686 
687     if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) {
688         gpa = run->system_event.data[R_R13];
689     }
690 
691     tdx_panicked_on_fatal_error(cpu, error_code, message, gpa);
692 
693     return -1;
694 }
695 
696 static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp)
697 {
698     TdxGuest *tdx = TDX_GUEST(obj);
699 
700     return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE);
701 }
702 
703 static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp)
704 {
705     TdxGuest *tdx = TDX_GUEST(obj);
706 
707     if (value) {
708         tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
709     } else {
710         tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
711     }
712 }
713 
714 static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp)
715 {
716     TdxGuest *tdx = TDX_GUEST(obj);
717 
718     return g_strdup(tdx->mrconfigid);
719 }
720 
721 static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp)
722 {
723     TdxGuest *tdx = TDX_GUEST(obj);
724 
725     g_free(tdx->mrconfigid);
726     tdx->mrconfigid = g_strdup(value);
727 }
728 
729 static char *tdx_guest_get_mrowner(Object *obj, Error **errp)
730 {
731     TdxGuest *tdx = TDX_GUEST(obj);
732 
733     return g_strdup(tdx->mrowner);
734 }
735 
736 static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp)
737 {
738     TdxGuest *tdx = TDX_GUEST(obj);
739 
740     g_free(tdx->mrowner);
741     tdx->mrowner = g_strdup(value);
742 }
743 
744 static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp)
745 {
746     TdxGuest *tdx = TDX_GUEST(obj);
747 
748     return g_strdup(tdx->mrownerconfig);
749 }
750 
751 static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp)
752 {
753     TdxGuest *tdx = TDX_GUEST(obj);
754 
755     g_free(tdx->mrownerconfig);
756     tdx->mrownerconfig = g_strdup(value);
757 }
758 
759 /* tdx guest */
760 OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest,
761                                    tdx_guest,
762                                    TDX_GUEST,
763                                    X86_CONFIDENTIAL_GUEST,
764                                    { TYPE_USER_CREATABLE },
765                                    { NULL })
766 
767 static void tdx_guest_init(Object *obj)
768 {
769     ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
770     TdxGuest *tdx = TDX_GUEST(obj);
771 
772     qemu_mutex_init(&tdx->lock);
773 
774     cgs->require_guest_memfd = true;
775     tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
776 
777     object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes,
778                                    OBJ_PROP_FLAG_READWRITE);
779     object_property_add_bool(obj, "sept-ve-disable",
780                              tdx_guest_get_sept_ve_disable,
781                              tdx_guest_set_sept_ve_disable);
782     object_property_add_str(obj, "mrconfigid",
783                             tdx_guest_get_mrconfigid,
784                             tdx_guest_set_mrconfigid);
785     object_property_add_str(obj, "mrowner",
786                             tdx_guest_get_mrowner, tdx_guest_set_mrowner);
787     object_property_add_str(obj, "mrownerconfig",
788                             tdx_guest_get_mrownerconfig,
789                             tdx_guest_set_mrownerconfig);
790 }
791 
792 static void tdx_guest_finalize(Object *obj)
793 {
794 }
795 
796 static void tdx_guest_class_init(ObjectClass *oc, const void *data)
797 {
798     ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
799     X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
800 
801     klass->kvm_init = tdx_kvm_init;
802     x86_klass->kvm_type = tdx_kvm_type;
803     x86_klass->cpu_instance_init = tdx_cpu_instance_init;
804 }
805