Lines Matching +full:ia32 +full:- +full:3 +full:a
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
14 #include <asm/ia32.h>
16 #include <asm/insn-eval.h>
31 #define VE_IS_IO_IN(e) ((e) & BIT(3))
37 #define TDCALL_RETURN_CODE(a) ((a) >> 32) argument
79 /* Read TD-scoped metadata */
93 /* Write TD-scoped metadata */
106 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
108 * @reportdata: Address of the input buffer which contains user-defined
116 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
131 return -EINVAL; in tdx_mcall_get_report0()
132 return -EIO; in tdx_mcall_get_report0()
140 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
145 * @size: size of the tdquote buffer (4KB-aligned).
155 /* Since buf is a shared memory, set the shared (decrypted) bits */ in tdx_hcall_get_quote()
197 * that no #VE will be delivered for accesses to TD-private memory.
250 * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
252 * In practice, this means that the kernel can only boot with a plain topology.
256 * Enabling the feature eliminates topology-related #VEs: the TDX module
265 /* Has the VMM provided a valid topology configuration? */ in enable_cpu_topology_enumeration()
299 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL in tdx_setup()
305 * The highest bit of a guest physical address is the "sharing" bit. in tdx_setup()
312 *cc_mask = BIT_ULL(gpa_width - 1); in tdx_setup()
317 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL); in tdx_setup()
325 * The TDX module spec states that #VE may be injected for a limited set of
328 * - Emulation of the architectural #VE injection on EPT violation;
330 * - As a result of guest TD execution of a disallowed instruction,
331 * a disallowed MSR access, or CPUID virtualization;
333 * - A notification to the guest TD about anomalous behavior;
335 * The last one is opt-in and is not used by the kernel.
347 switch (ve->exit_reason) { in ve_instr_len()
353 /* It is safe to use ve->instr_len for #VE due instructions */ in ve_instr_len()
354 return ve->instr_len; in ve_instr_len()
357 * For EPT violations, ve->insn_len is not defined. For those, in ve_instr_len()
361 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); in ve_instr_len()
364 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); in ve_instr_len()
365 return ve->instr_len; in ve_instr_len()
379 * can be found in TDX Guest-Host-Communication Interface in __halt()
397 * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a in handle_halt()
402 return -EIO; in handle_halt()
405 return -EIO; in handle_halt()
436 .r12 = regs->cx, in read_msr()
441 * can be found in TDX Guest-Host-Communication Interface in read_msr()
445 return -EIO; in read_msr()
447 regs->ax = lower_32_bits(args.r11); in read_msr()
448 regs->dx = upper_32_bits(args.r11); in read_msr()
457 .r12 = regs->cx, in write_msr()
458 .r13 = (u64)regs->dx << 32 | regs->ax, in write_msr()
463 * can be found in TDX Guest-Host-Communication Interface in write_msr()
467 return -EIO; in write_msr()
477 .r12 = regs->ax, in handle_cpuid()
478 .r13 = regs->cx, in handle_cpuid()
485 * Return all-zeros for any CPUID outside the range. It matches CPU in handle_cpuid()
486 * behaviour for non-supported leaf. in handle_cpuid()
488 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { in handle_cpuid()
489 regs->ax = regs->bx = regs->cx = regs->dx = 0; in handle_cpuid()
494 * Emulate the CPUID instruction via a hypercall. More info about in handle_cpuid()
495 * ABI can be found in TDX Guest-Host-Communication Interface in handle_cpuid()
499 return -EIO; in handle_cpuid()
502 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of in handle_cpuid()
506 regs->ax = args.r12; in handle_cpuid()
507 regs->bx = args.r13; in handle_cpuid()
508 regs->cx = args.r14; in handle_cpuid()
509 regs->dx = args.r15; in handle_cpuid()
546 /* Only in-kernel MMIO is supported */ in handle_mmio()
548 return -EFAULT; in handle_mmio()
550 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) in handle_mmio()
551 return -EFAULT; in handle_mmio()
554 return -EINVAL; in handle_mmio()
558 return -EINVAL; in handle_mmio()
563 return -EINVAL; in handle_mmio()
566 if (!fault_in_kernel_space(ve->gla)) { in handle_mmio()
568 return -EINVAL; in handle_mmio()
576 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page. in handle_mmio()
581 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) in handle_mmio()
582 return -EFAULT; in handle_mmio()
588 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
589 return -EIO; in handle_mmio()
593 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
594 return -EIO; in handle_mmio()
608 return -EINVAL; in handle_mmio()
611 return -EINVAL; in handle_mmio()
615 if (!mmio_read(size, ve->gpa, &val)) in handle_mmio()
616 return -EIO; in handle_mmio()
620 /* Zero-extend for 32-bit operation */ in handle_mmio()
638 return -EINVAL; in handle_mmio()
661 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_in()
667 regs->ax &= ~mask; in handle_in()
669 regs->ax |= args.r11 & mask; in handle_in()
680 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_out()
684 PORT_WRITE, port, regs->ax & mask); in handle_out()
697 u32 exit_qual = ve->exit_qual; in handle_io()
702 return -EIO; in handle_io()
714 return -EIO; in handle_io()
720 * Early #VE exception handler. Only handles a subset of port I/O.
737 regs->ip += insn_len; in tdx_early_handle_ve()
749 * This has to be called early in #VE handling. A "nested" #VE which in tdx_get_ve_info()
750 * occurs before this will raise a #DF and is not recoverable. in tdx_get_ve_info()
758 * valid flag is set. It means that NMI=>#VE will not result in a #DF. in tdx_get_ve_info()
763 ve->exit_reason = args.rcx; in tdx_get_ve_info()
764 ve->exit_qual = args.rdx; in tdx_get_ve_info()
765 ve->gla = args.r8; in tdx_get_ve_info()
766 ve->gpa = args.r9; in tdx_get_ve_info()
767 ve->instr_len = lower_32_bits(args.r10); in tdx_get_ve_info()
768 ve->instr_info = upper_32_bits(args.r10); in tdx_get_ve_info()
775 * or -errno on error.
779 switch (ve->exit_reason) { in virt_exception_user()
783 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_user()
784 return -EIO; in virt_exception_user()
797 * or -errno on error.
801 switch (ve->exit_reason) { in virt_exception_kernel()
811 if (is_private_gpa(ve->gpa)) in virt_exception_kernel()
812 panic("Unexpected EPT-violation on private memory."); in virt_exception_kernel()
817 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_kernel()
818 return -EIO; in virt_exception_kernel()
834 regs->ip += insn_len; in tdx_handle_virt_exception()
842 * TDX guest is responsible for flushing TLB on private->shared in tdx_tlb_flush_required()
843 * transition. VMM is responsible for flushing on shared->private. in tdx_tlb_flush_required()
851 * flush to avoid integrity failures in the face of a buggy or in tdx_tlb_flush_required()
870 * can be found in TDX Guest-Host-Communication Interface (GHCI),
875 /* Retrying the hypercall a second time should succeed; use 3 just in case */ in tdx_map_gpa()
876 const int max_retries_per_page = 3; in tdx_map_gpa()
890 .r13 = end - start }; in tdx_map_gpa()
906 /* "Consume" a retry without forward progress */ in tdx_map_gpa()
932 /* shared->private conversion requires memory to be accepted before use */ in tdx_enc_status_changed()
943 * Only handle shared->private conversion here. in tdx_enc_status_change_prepare()
947 return -EIO; in tdx_enc_status_change_prepare()
956 * Only handle private->shared conversion here. in tdx_enc_status_change_finish()
960 return -EIO; in tdx_enc_status_change_finish()
970 /* Stop new private<->shared conversions */
983 pr_warn("Failed to stop shared<->private conversions\n"); in tdx_kexec_begin()
1026 * *next* kernel through a private mapping will result in tdx_kexec_finish()
1030 * a pre-reserved memory range that is always private. in tdx_kexec_finish()
1032 * a crash if it accesses unconverted memory through in tdx_kexec_finish()
1033 * a private mapping which is possible when accessing in tdx_kexec_finish()
1040 pr_err("Failed to unshare range %#lx-%#lx\n", in tdx_kexec_finish()
1076 u32 eax, sig[3]; in tdx_early_init()
1101 physical_mask &= cc_mask - 1; in tdx_early_init()
1109 * - Private mapping => Shared Page == Guest shutdown in tdx_early_init()
1110 * - Shared mapping => Private Page == Recoverable #VE in tdx_early_init()
1118 * In both cases there is a temporary shared mapping to a private page, in tdx_early_init()
1119 * which can result in a #VE. But, there is never a private mapping to in tdx_early_init()
1120 * a shared page. in tdx_early_init()
1132 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that in tdx_early_init()
1134 * in STI-shadow, possibly resulting in missed wakeup events. in tdx_early_init()
1139 * in addition to having a reliable halt logic execution. in tdx_early_init()
1149 * Intel-TDX has a secure RDMSR hypercall, but that needs to be in tdx_early_init()