1 /* 2 * QEMU TDX support 3 * 4 * Copyright (c) 2025 Intel Corporation 5 * 6 * Author: 7 * Xiaoyao Li <xiaoyao.li@intel.com> 8 * 9 * SPDX-License-Identifier: GPL-2.0-or-later 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/error-report.h" 14 #include "qemu/base64.h" 15 #include "qemu/mmap-alloc.h" 16 #include "qapi/error.h" 17 #include "qom/object_interfaces.h" 18 #include "crypto/hash.h" 19 #include "system/system.h" 20 #include "system/ramblock.h" 21 22 #include <linux/kvm_para.h> 23 24 #include "hw/i386/e820_memory_layout.h" 25 #include "hw/i386/tdvf.h" 26 #include "hw/i386/x86.h" 27 #include "hw/i386/tdvf-hob.h" 28 #include "kvm_i386.h" 29 #include "tdx.h" 30 31 #define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000) 32 #define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000) 33 34 #define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0) 35 #define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28) 36 #define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30) 37 #define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63) 38 39 #define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\ 40 TDX_TD_ATTRIBUTES_PKS | \ 41 TDX_TD_ATTRIBUTES_PERFMON) 42 43 static TdxGuest *tdx_guest; 44 45 static struct kvm_tdx_capabilities *tdx_caps; 46 47 /* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */ 48 bool is_tdx_vm(void) 49 { 50 return !!tdx_guest; 51 } 52 53 enum tdx_ioctl_level { 54 TDX_VM_IOCTL, 55 TDX_VCPU_IOCTL, 56 }; 57 58 static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state, 59 int cmd_id, __u32 flags, void *data, 60 Error **errp) 61 { 62 struct kvm_tdx_cmd tdx_cmd = {}; 63 int r; 64 65 const char *tdx_ioctl_name[] = { 66 [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES", 67 [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM", 68 [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU", 69 [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION", 70 [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM", 71 [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID", 72 }; 73 74 tdx_cmd.id = cmd_id; 75 tdx_cmd.flags = flags; 76 tdx_cmd.data = (__u64)(unsigned long)data; 77 78 switch (level) { 79 case TDX_VM_IOCTL: 80 r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); 81 break; 82 case TDX_VCPU_IOCTL: 83 r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); 84 break; 85 default: 86 error_setg(errp, "Invalid tdx_ioctl_level %d", level); 87 return -EINVAL; 88 } 89 90 if (r < 0) { 91 error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx", 92 tdx_ioctl_name[cmd_id], tdx_cmd.hw_error); 93 } 94 return r; 95 } 96 97 static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data, 98 Error **errp) 99 { 100 return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp); 101 } 102 103 static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags, 104 void *data, Error **errp) 105 { 106 return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp); 107 } 108 109 static int get_tdx_capabilities(Error **errp) 110 { 111 struct kvm_tdx_capabilities *caps; 112 /* 1st generation of TDX reports 6 cpuid configs */ 113 int nr_cpuid_configs = 6; 114 size_t size; 115 int r; 116 117 do { 118 Error *local_err = NULL; 119 size = sizeof(struct kvm_tdx_capabilities) + 120 nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2); 121 caps = g_malloc0(size); 122 caps->cpuid.nent = nr_cpuid_configs; 123 124 r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err); 125 if (r == -E2BIG) { 126 g_free(caps); 127 nr_cpuid_configs *= 2; 128 if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) { 129 error_report("KVM TDX seems broken that number of CPUID entries" 130 " in kvm_tdx_capabilities exceeds limit: %d", 131 KVM_MAX_CPUID_ENTRIES); 132 error_propagate(errp, local_err); 133 return r; 134 } 135 error_free(local_err); 136 } else if (r < 0) { 137 g_free(caps); 138 error_propagate(errp, local_err); 139 return r; 140 } 141 } while (r == -E2BIG); 142 143 tdx_caps = caps; 144 145 return 0; 146 } 147 148 void tdx_set_tdvf_region(MemoryRegion *tdvf_mr) 149 { 150 assert(!tdx_guest->tdvf_mr); 151 tdx_guest->tdvf_mr = tdvf_mr; 152 } 153 154 static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx) 155 { 156 TdxFirmwareEntry *entry; 157 158 for_each_tdx_fw_entry(&tdx->tdvf, entry) { 159 if (entry->type == TDVF_SECTION_TYPE_TD_HOB) { 160 return entry; 161 } 162 } 163 error_report("TDVF metadata doesn't specify TD_HOB location."); 164 exit(1); 165 } 166 167 static void tdx_add_ram_entry(uint64_t address, uint64_t length, 168 enum TdxRamType type) 169 { 170 uint32_t nr_entries = tdx_guest->nr_ram_entries; 171 tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries, 172 nr_entries + 1); 173 174 tdx_guest->ram_entries[nr_entries].address = address; 175 tdx_guest->ram_entries[nr_entries].length = length; 176 tdx_guest->ram_entries[nr_entries].type = type; 177 tdx_guest->nr_ram_entries++; 178 } 179 180 static int tdx_accept_ram_range(uint64_t address, uint64_t length) 181 { 182 uint64_t head_start, tail_start, head_length, tail_length; 183 uint64_t tmp_address, tmp_length; 184 TdxRamEntry *e; 185 int i = 0; 186 187 do { 188 if (i == tdx_guest->nr_ram_entries) { 189 return -1; 190 } 191 192 e = &tdx_guest->ram_entries[i++]; 193 } while (address + length <= e->address || address >= e->address + e->length); 194 195 /* 196 * The to-be-accepted ram range must be fully contained by one 197 * RAM entry. 198 */ 199 if (e->address > address || 200 e->address + e->length < address + length) { 201 return -1; 202 } 203 204 if (e->type == TDX_RAM_ADDED) { 205 return 0; 206 } 207 208 tmp_address = e->address; 209 tmp_length = e->length; 210 211 e->address = address; 212 e->length = length; 213 e->type = TDX_RAM_ADDED; 214 215 head_length = address - tmp_address; 216 if (head_length > 0) { 217 head_start = tmp_address; 218 tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED); 219 } 220 221 tail_start = address + length; 222 if (tail_start < tmp_address + tmp_length) { 223 tail_length = tmp_address + tmp_length - tail_start; 224 tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED); 225 } 226 227 return 0; 228 } 229 230 static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_) 231 { 232 const TdxRamEntry *lhs = lhs_; 233 const TdxRamEntry *rhs = rhs_; 234 235 if (lhs->address == rhs->address) { 236 return 0; 237 } 238 if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) { 239 return 1; 240 } 241 return -1; 242 } 243 244 static void tdx_init_ram_entries(void) 245 { 246 unsigned i, j, nr_e820_entries; 247 248 nr_e820_entries = e820_get_table(NULL); 249 tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries); 250 251 for (i = 0, j = 0; i < nr_e820_entries; i++) { 252 uint64_t addr, len; 253 254 if (e820_get_entry(i, E820_RAM, &addr, &len)) { 255 tdx_guest->ram_entries[j].address = addr; 256 tdx_guest->ram_entries[j].length = len; 257 tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED; 258 j++; 259 } 260 } 261 tdx_guest->nr_ram_entries = j; 262 } 263 264 static void tdx_post_init_vcpus(void) 265 { 266 TdxFirmwareEntry *hob; 267 CPUState *cpu; 268 269 hob = tdx_get_hob_entry(tdx_guest); 270 CPU_FOREACH(cpu) { 271 tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)hob->address, 272 &error_fatal); 273 } 274 } 275 276 static void tdx_finalize_vm(Notifier *notifier, void *unused) 277 { 278 TdxFirmware *tdvf = &tdx_guest->tdvf; 279 TdxFirmwareEntry *entry; 280 RAMBlock *ram_block; 281 Error *local_err = NULL; 282 int r; 283 284 tdx_init_ram_entries(); 285 286 for_each_tdx_fw_entry(tdvf, entry) { 287 switch (entry->type) { 288 case TDVF_SECTION_TYPE_BFV: 289 case TDVF_SECTION_TYPE_CFV: 290 entry->mem_ptr = tdvf->mem_ptr + entry->data_offset; 291 break; 292 case TDVF_SECTION_TYPE_TD_HOB: 293 case TDVF_SECTION_TYPE_TEMP_MEM: 294 entry->mem_ptr = qemu_ram_mmap(-1, entry->size, 295 qemu_real_host_page_size(), 0, 0); 296 if (entry->mem_ptr == MAP_FAILED) { 297 error_report("Failed to mmap memory for TDVF section %d", 298 entry->type); 299 exit(1); 300 } 301 if (tdx_accept_ram_range(entry->address, entry->size)) { 302 error_report("Failed to accept memory for TDVF section %d", 303 entry->type); 304 qemu_ram_munmap(-1, entry->mem_ptr, entry->size); 305 exit(1); 306 } 307 break; 308 default: 309 error_report("Unsupported TDVF section %d", entry->type); 310 exit(1); 311 } 312 } 313 314 qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries, 315 sizeof(TdxRamEntry), &tdx_ram_entry_compare); 316 317 tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest)); 318 319 tdx_post_init_vcpus(); 320 321 for_each_tdx_fw_entry(tdvf, entry) { 322 struct kvm_tdx_init_mem_region region; 323 uint32_t flags; 324 325 region = (struct kvm_tdx_init_mem_region) { 326 .source_addr = (uint64_t)entry->mem_ptr, 327 .gpa = entry->address, 328 .nr_pages = entry->size >> 12, 329 }; 330 331 flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ? 332 KVM_TDX_MEASURE_MEMORY_REGION : 0; 333 334 do { 335 error_free(local_err); 336 local_err = NULL; 337 r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags, 338 ®ion, &local_err); 339 } while (r == -EAGAIN || r == -EINTR); 340 if (r < 0) { 341 error_report_err(local_err); 342 exit(1); 343 } 344 345 if (entry->type == TDVF_SECTION_TYPE_TD_HOB || 346 entry->type == TDVF_SECTION_TYPE_TEMP_MEM) { 347 qemu_ram_munmap(-1, entry->mem_ptr, entry->size); 348 entry->mem_ptr = NULL; 349 } 350 } 351 352 /* 353 * TDVF image has been copied into private region above via 354 * KVM_MEMORY_MAPPING. It becomes useless. 355 */ 356 ram_block = tdx_guest->tdvf_mr->ram_block; 357 ram_block_discard_range(ram_block, 0, ram_block->max_length); 358 359 tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal); 360 CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true; 361 } 362 363 static Notifier tdx_machine_done_notify = { 364 .notify = tdx_finalize_vm, 365 }; 366 367 static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) 368 { 369 TdxGuest *tdx = TDX_GUEST(cgs); 370 int r = 0; 371 372 kvm_mark_guest_state_protected(); 373 374 if (!tdx_caps) { 375 r = get_tdx_capabilities(errp); 376 if (r) { 377 return r; 378 } 379 } 380 381 /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */ 382 if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) { 383 return -EOPNOTSUPP; 384 } 385 386 qemu_add_machine_init_done_notifier(&tdx_machine_done_notify); 387 388 tdx_guest = tdx; 389 return 0; 390 } 391 392 static int tdx_kvm_type(X86ConfidentialGuest *cg) 393 { 394 /* Do the object check */ 395 TDX_GUEST(cg); 396 397 return KVM_X86_TDX_VM; 398 } 399 400 static int tdx_validate_attributes(TdxGuest *tdx, Error **errp) 401 { 402 if ((tdx->attributes & ~tdx_caps->supported_attrs)) { 403 error_setg(errp, "Invalid attributes 0x%lx for TDX VM " 404 "(KVM supported: 0x%llx)", tdx->attributes, 405 tdx_caps->supported_attrs); 406 return -1; 407 } 408 409 if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) { 410 error_setg(errp, "Some QEMU unsupported TD attribute bits being " 411 "requested: 0x%lx (QEMU supported: 0x%llx)", 412 tdx->attributes, TDX_SUPPORTED_TD_ATTRS); 413 return -1; 414 } 415 416 return 0; 417 } 418 419 static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp) 420 { 421 CPUX86State *env = &x86cpu->env; 422 423 tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ? 424 TDX_TD_ATTRIBUTES_PKS : 0; 425 tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0; 426 427 return tdx_validate_attributes(tdx_guest, errp); 428 } 429 430 static int setup_td_xfam(X86CPU *x86cpu, Error **errp) 431 { 432 CPUX86State *env = &x86cpu->env; 433 uint64_t xfam; 434 435 xfam = env->features[FEAT_XSAVE_XCR0_LO] | 436 env->features[FEAT_XSAVE_XCR0_HI] | 437 env->features[FEAT_XSAVE_XSS_LO] | 438 env->features[FEAT_XSAVE_XSS_HI]; 439 440 if (xfam & ~tdx_caps->supported_xfam) { 441 error_setg(errp, "Invalid XFAM 0x%lx for TDX VM (supported: 0x%llx))", 442 xfam, tdx_caps->supported_xfam); 443 return -1; 444 } 445 446 tdx_guest->xfam = xfam; 447 return 0; 448 } 449 450 static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids) 451 { 452 int i, dest_cnt = 0; 453 struct kvm_cpuid_entry2 *src, *dest, *conf; 454 455 for (i = 0; i < cpuids->nent; i++) { 456 src = cpuids->entries + i; 457 conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index); 458 if (!conf) { 459 continue; 460 } 461 dest = cpuids->entries + dest_cnt; 462 463 dest->function = src->function; 464 dest->index = src->index; 465 dest->flags = src->flags; 466 dest->eax = src->eax & conf->eax; 467 dest->ebx = src->ebx & conf->ebx; 468 dest->ecx = src->ecx & conf->ecx; 469 dest->edx = src->edx & conf->edx; 470 471 dest_cnt++; 472 } 473 cpuids->nent = dest_cnt++; 474 } 475 476 int tdx_pre_create_vcpu(CPUState *cpu, Error **errp) 477 { 478 X86CPU *x86cpu = X86_CPU(cpu); 479 CPUX86State *env = &x86cpu->env; 480 g_autofree struct kvm_tdx_init_vm *init_vm = NULL; 481 Error *local_err = NULL; 482 size_t data_len; 483 int retry = 10000; 484 int r = 0; 485 486 QEMU_LOCK_GUARD(&tdx_guest->lock); 487 if (tdx_guest->initialized) { 488 return r; 489 } 490 491 init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) + 492 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 493 494 if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) { 495 error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS"); 496 return -EOPNOTSUPP; 497 } 498 499 r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS, 500 0, TDX_APIC_BUS_CYCLES_NS); 501 if (r < 0) { 502 error_setg_errno(errp, -r, 503 "Unable to set core crystal clock frequency to 25MHz"); 504 return r; 505 } 506 507 if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ || 508 env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) { 509 error_setg(errp, "Invalid TSC %ld KHz, must specify cpu_frequency " 510 "between [%d, %d] kHz", env->tsc_khz, 511 TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ); 512 return -EINVAL; 513 } 514 515 if (env->tsc_khz % (25 * 1000)) { 516 error_setg(errp, "Invalid TSC %ld KHz, it must be multiple of 25MHz", 517 env->tsc_khz); 518 return -EINVAL; 519 } 520 521 /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */ 522 r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz); 523 if (r < 0) { 524 error_setg_errno(errp, -r, "Unable to set TSC frequency to %ld kHz", 525 env->tsc_khz); 526 return r; 527 } 528 529 if (tdx_guest->mrconfigid) { 530 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid, 531 strlen(tdx_guest->mrconfigid), &data_len, errp); 532 if (!data) { 533 return -1; 534 } 535 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 536 error_setg(errp, "TDX: failed to decode mrconfigid"); 537 return -1; 538 } 539 memcpy(init_vm->mrconfigid, data, data_len); 540 } 541 542 if (tdx_guest->mrowner) { 543 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner, 544 strlen(tdx_guest->mrowner), &data_len, errp); 545 if (!data) { 546 return -1; 547 } 548 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 549 error_setg(errp, "TDX: failed to decode mrowner"); 550 return -1; 551 } 552 memcpy(init_vm->mrowner, data, data_len); 553 } 554 555 if (tdx_guest->mrownerconfig) { 556 g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig, 557 strlen(tdx_guest->mrownerconfig), &data_len, errp); 558 if (!data) { 559 return -1; 560 } 561 if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { 562 error_setg(errp, "TDX: failed to decode mrownerconfig"); 563 return -1; 564 } 565 memcpy(init_vm->mrownerconfig, data, data_len); 566 } 567 568 r = setup_td_guest_attributes(x86cpu, errp); 569 if (r) { 570 return r; 571 } 572 573 r = setup_td_xfam(x86cpu, errp); 574 if (r) { 575 return r; 576 } 577 578 init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0); 579 tdx_filter_cpuid(&init_vm->cpuid); 580 581 init_vm->attributes = tdx_guest->attributes; 582 init_vm->xfam = tdx_guest->xfam; 583 584 /* 585 * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE) 586 * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or 587 * RDSEED) is busy. 588 * 589 * Retry for the case. 590 */ 591 do { 592 error_free(local_err); 593 local_err = NULL; 594 r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err); 595 } while (r == -EAGAIN && --retry); 596 597 if (r < 0) { 598 if (!retry) { 599 error_append_hint(&local_err, "Hardware RNG (Random Number " 600 "Generator) is busy occupied by someone (via RDRAND/RDSEED) " 601 "maliciously, which leads to KVM_TDX_INIT_VM keeping failure " 602 "due to lack of entropy.\n"); 603 } 604 error_propagate(errp, local_err); 605 return r; 606 } 607 608 tdx_guest->initialized = true; 609 610 return 0; 611 } 612 613 int tdx_parse_tdvf(void *flash_ptr, int size) 614 { 615 return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size); 616 } 617 618 static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp) 619 { 620 TdxGuest *tdx = TDX_GUEST(obj); 621 622 return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE); 623 } 624 625 static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp) 626 { 627 TdxGuest *tdx = TDX_GUEST(obj); 628 629 if (value) { 630 tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 631 } else { 632 tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 633 } 634 } 635 636 static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp) 637 { 638 TdxGuest *tdx = TDX_GUEST(obj); 639 640 return g_strdup(tdx->mrconfigid); 641 } 642 643 static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp) 644 { 645 TdxGuest *tdx = TDX_GUEST(obj); 646 647 g_free(tdx->mrconfigid); 648 tdx->mrconfigid = g_strdup(value); 649 } 650 651 static char *tdx_guest_get_mrowner(Object *obj, Error **errp) 652 { 653 TdxGuest *tdx = TDX_GUEST(obj); 654 655 return g_strdup(tdx->mrowner); 656 } 657 658 static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp) 659 { 660 TdxGuest *tdx = TDX_GUEST(obj); 661 662 g_free(tdx->mrowner); 663 tdx->mrowner = g_strdup(value); 664 } 665 666 static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp) 667 { 668 TdxGuest *tdx = TDX_GUEST(obj); 669 670 return g_strdup(tdx->mrownerconfig); 671 } 672 673 static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp) 674 { 675 TdxGuest *tdx = TDX_GUEST(obj); 676 677 g_free(tdx->mrownerconfig); 678 tdx->mrownerconfig = g_strdup(value); 679 } 680 681 /* tdx guest */ 682 OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest, 683 tdx_guest, 684 TDX_GUEST, 685 X86_CONFIDENTIAL_GUEST, 686 { TYPE_USER_CREATABLE }, 687 { NULL }) 688 689 static void tdx_guest_init(Object *obj) 690 { 691 ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj); 692 TdxGuest *tdx = TDX_GUEST(obj); 693 694 qemu_mutex_init(&tdx->lock); 695 696 cgs->require_guest_memfd = true; 697 tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; 698 699 object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes, 700 OBJ_PROP_FLAG_READWRITE); 701 object_property_add_bool(obj, "sept-ve-disable", 702 tdx_guest_get_sept_ve_disable, 703 tdx_guest_set_sept_ve_disable); 704 object_property_add_str(obj, "mrconfigid", 705 tdx_guest_get_mrconfigid, 706 tdx_guest_set_mrconfigid); 707 object_property_add_str(obj, "mrowner", 708 tdx_guest_get_mrowner, tdx_guest_set_mrowner); 709 object_property_add_str(obj, "mrownerconfig", 710 tdx_guest_get_mrownerconfig, 711 tdx_guest_set_mrownerconfig); 712 } 713 714 static void tdx_guest_finalize(Object *obj) 715 { 716 } 717 718 static void tdx_guest_class_init(ObjectClass *oc, const void *data) 719 { 720 ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); 721 X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); 722 723 klass->kvm_init = tdx_kvm_init; 724 x86_klass->kvm_type = tdx_kvm_type; 725 } 726