1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/kexec.h>
11 #include <asm/coco.h>
12 #include <asm/tdx.h>
13 #include <asm/vmx.h>
14 #include <asm/ia32.h>
15 #include <asm/insn.h>
16 #include <asm/insn-eval.h>
17 #include <asm/paravirt_types.h>
18 #include <asm/pgtable.h>
19 #include <asm/set_memory.h>
20 #include <asm/traps.h>
21
22 /* MMIO direction */
23 #define EPT_READ 0
24 #define EPT_WRITE 1
25
26 /* Port I/O direction */
27 #define PORT_READ 0
28 #define PORT_WRITE 1
29
30 /* See Exit Qualification for I/O Instructions in VMX documentation */
31 #define VE_IS_IO_IN(e) ((e) & BIT(3))
32 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
33 #define VE_GET_PORT_NUM(e) ((e) >> 16)
34 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
35
36 /* TDX Module call error codes */
37 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
38 #define TDCALL_INVALID_OPERAND 0xc0000100
39
40 #define TDREPORT_SUBTYPE_0 0
41
42 static atomic_long_t nr_shared;
43
44 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)45 noinstr void __noreturn __tdx_hypercall_failed(void)
46 {
47 instrumentation_begin();
48 panic("TDVMCALL failed. TDX module bug?");
49 }
50
51 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)52 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
53 unsigned long p3, unsigned long p4)
54 {
55 struct tdx_module_args args = {
56 .r10 = nr,
57 .r11 = p1,
58 .r12 = p2,
59 .r13 = p3,
60 .r14 = p4,
61 };
62
63 return __tdx_hypercall(&args);
64 }
65 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
66 #endif
67
68 /*
69 * Used for TDX guests to make calls directly to the TD module. This
70 * should only be used for calls that have no legitimate reason to fail
71 * or where the kernel can not survive the call failing.
72 */
tdcall(u64 fn,struct tdx_module_args * args)73 static inline void tdcall(u64 fn, struct tdx_module_args *args)
74 {
75 if (__tdcall_ret(fn, args))
76 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
77 }
78
79 /* Read TD-scoped metadata */
tdg_vm_rd(u64 field,u64 * value)80 static inline u64 tdg_vm_rd(u64 field, u64 *value)
81 {
82 struct tdx_module_args args = {
83 .rdx = field,
84 };
85 u64 ret;
86
87 ret = __tdcall_ret(TDG_VM_RD, &args);
88 *value = args.r8;
89
90 return ret;
91 }
92
93 /* Write TD-scoped metadata */
tdg_vm_wr(u64 field,u64 value,u64 mask)94 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
95 {
96 struct tdx_module_args args = {
97 .rdx = field,
98 .r8 = value,
99 .r9 = mask,
100 };
101
102 return __tdcall(TDG_VM_WR, &args);
103 }
104
105 /**
106 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
107 * subtype 0) using TDG.MR.REPORT TDCALL.
108 * @reportdata: Address of the input buffer which contains user-defined
109 * REPORTDATA to be included into TDREPORT.
110 * @tdreport: Address of the output buffer to store TDREPORT.
111 *
112 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
113 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
114 * It is used in the TDX guest driver module to get the TDREPORT0.
115 *
116 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
117 * other TDCALL failures.
118 */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)119 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
120 {
121 struct tdx_module_args args = {
122 .rcx = virt_to_phys(tdreport),
123 .rdx = virt_to_phys(reportdata),
124 .r8 = TDREPORT_SUBTYPE_0,
125 };
126 u64 ret;
127
128 ret = __tdcall(TDG_MR_REPORT, &args);
129 if (ret) {
130 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
131 return -EINVAL;
132 return -EIO;
133 }
134
135 return 0;
136 }
137 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
138
139 /**
140 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
141 * hypercall.
142 * @buf: Address of the directly mapped shared kernel buffer which
143 * contains TDREPORT. The same buffer will be used by VMM to
144 * store the generated TD Quote output.
145 * @size: size of the tdquote buffer (4KB-aligned).
146 *
147 * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
148 * v1.0 specification for more information on GetQuote hypercall.
149 * It is used in the TDX guest driver module to get the TD Quote.
150 *
151 * Return 0 on success or error code on failure.
152 */
tdx_hcall_get_quote(u8 * buf,size_t size)153 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
154 {
155 /* Since buf is a shared memory, set the shared (decrypted) bits */
156 return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
157 }
158 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
159
tdx_panic(const char * msg)160 static void __noreturn tdx_panic(const char *msg)
161 {
162 struct tdx_module_args args = {
163 .r10 = TDX_HYPERCALL_STANDARD,
164 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
165 .r12 = 0, /* Error code: 0 is Panic */
166 };
167 union {
168 /* Define register order according to the GHCI */
169 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
170
171 char bytes[64] __nonstring;
172 } message;
173
174 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
175 strtomem_pad(message.bytes, msg, '\0');
176
177 args.r8 = message.r8;
178 args.r9 = message.r9;
179 args.r14 = message.r14;
180 args.r15 = message.r15;
181 args.rdi = message.rdi;
182 args.rsi = message.rsi;
183 args.rbx = message.rbx;
184 args.rdx = message.rdx;
185
186 /*
187 * This hypercall should never return and it is not safe
188 * to keep the guest running. Call it forever if it
189 * happens to return.
190 */
191 while (1)
192 __tdx_hypercall(&args);
193 }
194
195 /*
196 * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
197 * that no #VE will be delivered for accesses to TD-private memory.
198 *
199 * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
200 * controls if the guest will receive such #VE with TD attribute
201 * TDX_ATTR_SEPT_VE_DISABLE.
202 *
203 * Newer TDX modules allow the guest to control if it wants to receive SEPT
204 * violation #VEs.
205 *
206 * Check if the feature is available and disable SEPT #VE if possible.
207 *
208 * If the TD is allowed to disable/enable SEPT #VEs, the TDX_ATTR_SEPT_VE_DISABLE
209 * attribute is no longer reliable. It reflects the initial state of the
210 * control for the TD, but it will not be updated if someone (e.g. bootloader)
211 * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
212 * determine if SEPT #VEs are enabled or disabled.
213 */
disable_sept_ve(u64 td_attr)214 static void disable_sept_ve(u64 td_attr)
215 {
216 const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
217 bool debug = td_attr & TDX_ATTR_DEBUG;
218 u64 config, controls;
219
220 /* Is this TD allowed to disable SEPT #VE */
221 tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
222 if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
223 /* No SEPT #VE controls for the guest: check the attribute */
224 if (td_attr & TDX_ATTR_SEPT_VE_DISABLE)
225 return;
226
227 /* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
228 if (debug)
229 pr_warn("%s\n", msg);
230 else
231 tdx_panic(msg);
232 return;
233 }
234
235 /* Check if SEPT #VE has been disabled before us */
236 tdg_vm_rd(TDCS_TD_CTLS, &controls);
237 if (controls & TD_CTLS_PENDING_VE_DISABLE)
238 return;
239
240 /* Keep #VEs enabled for splats in debugging environments */
241 if (debug)
242 return;
243
244 /* Disable SEPT #VEs */
245 tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
246 TD_CTLS_PENDING_VE_DISABLE);
247 }
248
249 /*
250 * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
251 * 0x1F) and the X2APIC_APICID MSR. The kernel returns all zeros on CPUID #VEs.
252 * In practice, this means that the kernel can only boot with a plain topology.
253 * Any complications will cause problems.
254 *
255 * The ENUM_TOPOLOGY feature allows the VMM to provide topology information.
256 * Enabling the feature eliminates topology-related #VEs: the TDX module
257 * virtualizes accesses to the CPUID leafs and the MSR.
258 *
259 * Enable ENUM_TOPOLOGY if it is available.
260 */
enable_cpu_topology_enumeration(void)261 static void enable_cpu_topology_enumeration(void)
262 {
263 u64 configured;
264
265 /* Has the VMM provided a valid topology configuration? */
266 tdg_vm_rd(TDCS_TOPOLOGY_ENUM_CONFIGURED, &configured);
267 if (!configured) {
268 pr_err("VMM did not configure X2APIC_IDs properly\n");
269 return;
270 }
271
272 tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_ENUM_TOPOLOGY, TD_CTLS_ENUM_TOPOLOGY);
273 }
274
reduce_unnecessary_ve(void)275 static void reduce_unnecessary_ve(void)
276 {
277 u64 err = tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_REDUCE_VE, TD_CTLS_REDUCE_VE);
278
279 if (err == TDX_SUCCESS)
280 return;
281
282 /*
283 * Enabling REDUCE_VE includes ENUM_TOPOLOGY. Only try to
284 * enable ENUM_TOPOLOGY if REDUCE_VE was not successful.
285 */
286 enable_cpu_topology_enumeration();
287 }
288
tdx_setup(u64 * cc_mask)289 static void tdx_setup(u64 *cc_mask)
290 {
291 struct tdx_module_args args = {};
292 unsigned int gpa_width;
293 u64 td_attr;
294
295 /*
296 * TDINFO TDX module call is used to get the TD execution environment
297 * information like GPA width, number of available vcpus, debug mode
298 * information, etc. More details about the ABI can be found in TDX
299 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
300 * [TDG.VP.INFO].
301 */
302 tdcall(TDG_VP_INFO, &args);
303
304 /*
305 * The highest bit of a guest physical address is the "sharing" bit.
306 * Set it for shared pages and clear it for private pages.
307 *
308 * The GPA width that comes out of this call is critical. TDX guests
309 * can not meaningfully run without it.
310 */
311 gpa_width = args.rcx & GENMASK(5, 0);
312 *cc_mask = BIT_ULL(gpa_width - 1);
313
314 td_attr = args.rdx;
315
316 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
317 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
318
319 disable_sept_ve(td_attr);
320
321 reduce_unnecessary_ve();
322 }
323
324 /*
325 * The TDX module spec states that #VE may be injected for a limited set of
326 * reasons:
327 *
328 * - Emulation of the architectural #VE injection on EPT violation;
329 *
330 * - As a result of guest TD execution of a disallowed instruction,
331 * a disallowed MSR access, or CPUID virtualization;
332 *
333 * - A notification to the guest TD about anomalous behavior;
334 *
335 * The last one is opt-in and is not used by the kernel.
336 *
337 * The Intel Software Developer's Manual describes cases when instruction
338 * length field can be used in section "Information for VM Exits Due to
339 * Instruction Execution".
340 *
341 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
342 * information if #VE occurred due to instruction execution, but not for EPT
343 * violations.
344 */
ve_instr_len(struct ve_info * ve)345 static int ve_instr_len(struct ve_info *ve)
346 {
347 switch (ve->exit_reason) {
348 case EXIT_REASON_HLT:
349 case EXIT_REASON_MSR_READ:
350 case EXIT_REASON_MSR_WRITE:
351 case EXIT_REASON_CPUID:
352 case EXIT_REASON_IO_INSTRUCTION:
353 /* It is safe to use ve->instr_len for #VE due instructions */
354 return ve->instr_len;
355 case EXIT_REASON_EPT_VIOLATION:
356 /*
357 * For EPT violations, ve->insn_len is not defined. For those,
358 * the kernel must decode instructions manually and should not
359 * be using this function.
360 */
361 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
362 return 0;
363 default:
364 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
365 return ve->instr_len;
366 }
367 }
368
__halt(const bool irq_disabled)369 static u64 __cpuidle __halt(const bool irq_disabled)
370 {
371 struct tdx_module_args args = {
372 .r10 = TDX_HYPERCALL_STANDARD,
373 .r11 = hcall_func(EXIT_REASON_HLT),
374 .r12 = irq_disabled,
375 };
376
377 /*
378 * Emulate HLT operation via hypercall. More info about ABI
379 * can be found in TDX Guest-Host-Communication Interface
380 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
381 *
382 * The VMM uses the "IRQ disabled" param to understand IRQ
383 * enabled status (RFLAGS.IF) of the TD guest and to determine
384 * whether or not it should schedule the halted vCPU if an
385 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
386 * can keep the vCPU in virtual HLT, even if an IRQ is
387 * pending, without hanging/breaking the guest.
388 */
389 return __tdx_hypercall(&args);
390 }
391
handle_halt(struct ve_info * ve)392 static int handle_halt(struct ve_info *ve)
393 {
394 const bool irq_disabled = irqs_disabled();
395
396 /*
397 * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a
398 * wake event may be consumed before requesting HLT emulation, leaving
399 * the vCPU blocking indefinitely.
400 */
401 if (WARN_ONCE(!irq_disabled, "HLT emulation with IRQs enabled"))
402 return -EIO;
403
404 if (__halt(irq_disabled))
405 return -EIO;
406
407 return ve_instr_len(ve);
408 }
409
tdx_halt(void)410 void __cpuidle tdx_halt(void)
411 {
412 const bool irq_disabled = false;
413
414 /*
415 * Use WARN_ONCE() to report the failure.
416 */
417 if (__halt(irq_disabled))
418 WARN_ONCE(1, "HLT instruction emulation failed\n");
419 }
420
tdx_safe_halt(void)421 static void __cpuidle tdx_safe_halt(void)
422 {
423 tdx_halt();
424 /*
425 * "__cpuidle" section doesn't support instrumentation, so stick
426 * with raw_* variant that avoids tracing hooks.
427 */
428 raw_local_irq_enable();
429 }
430
read_msr(struct pt_regs * regs,struct ve_info * ve)431 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
432 {
433 struct tdx_module_args args = {
434 .r10 = TDX_HYPERCALL_STANDARD,
435 .r11 = hcall_func(EXIT_REASON_MSR_READ),
436 .r12 = regs->cx,
437 };
438
439 /*
440 * Emulate the MSR read via hypercall. More info about ABI
441 * can be found in TDX Guest-Host-Communication Interface
442 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
443 */
444 if (__tdx_hypercall(&args))
445 return -EIO;
446
447 regs->ax = lower_32_bits(args.r11);
448 regs->dx = upper_32_bits(args.r11);
449 return ve_instr_len(ve);
450 }
451
write_msr(struct pt_regs * regs,struct ve_info * ve)452 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
453 {
454 struct tdx_module_args args = {
455 .r10 = TDX_HYPERCALL_STANDARD,
456 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
457 .r12 = regs->cx,
458 .r13 = (u64)regs->dx << 32 | regs->ax,
459 };
460
461 /*
462 * Emulate the MSR write via hypercall. More info about ABI
463 * can be found in TDX Guest-Host-Communication Interface
464 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
465 */
466 if (__tdx_hypercall(&args))
467 return -EIO;
468
469 return ve_instr_len(ve);
470 }
471
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)472 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
473 {
474 struct tdx_module_args args = {
475 .r10 = TDX_HYPERCALL_STANDARD,
476 .r11 = hcall_func(EXIT_REASON_CPUID),
477 .r12 = regs->ax,
478 .r13 = regs->cx,
479 };
480
481 /*
482 * Only allow VMM to control range reserved for hypervisor
483 * communication.
484 *
485 * Return all-zeros for any CPUID outside the range. It matches CPU
486 * behaviour for non-supported leaf.
487 */
488 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
489 regs->ax = regs->bx = regs->cx = regs->dx = 0;
490 return ve_instr_len(ve);
491 }
492
493 /*
494 * Emulate the CPUID instruction via a hypercall. More info about
495 * ABI can be found in TDX Guest-Host-Communication Interface
496 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
497 */
498 if (__tdx_hypercall(&args))
499 return -EIO;
500
501 /*
502 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
503 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
504 * So copy the register contents back to pt_regs.
505 */
506 regs->ax = args.r12;
507 regs->bx = args.r13;
508 regs->cx = args.r14;
509 regs->dx = args.r15;
510
511 return ve_instr_len(ve);
512 }
513
mmio_read(int size,unsigned long addr,unsigned long * val)514 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
515 {
516 struct tdx_module_args args = {
517 .r10 = TDX_HYPERCALL_STANDARD,
518 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
519 .r12 = size,
520 .r13 = EPT_READ,
521 .r14 = addr,
522 };
523
524 if (__tdx_hypercall(&args))
525 return false;
526
527 *val = args.r11;
528 return true;
529 }
530
mmio_write(int size,unsigned long addr,unsigned long val)531 static bool mmio_write(int size, unsigned long addr, unsigned long val)
532 {
533 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
534 EPT_WRITE, addr, val);
535 }
536
handle_mmio(struct pt_regs * regs,struct ve_info * ve)537 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
538 {
539 unsigned long *reg, val, vaddr;
540 char buffer[MAX_INSN_SIZE];
541 enum insn_mmio_type mmio;
542 struct insn insn = {};
543 int size, extend_size;
544 u8 extend_val = 0;
545
546 /* Only in-kernel MMIO is supported */
547 if (WARN_ON_ONCE(user_mode(regs)))
548 return -EFAULT;
549
550 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
551 return -EFAULT;
552
553 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
554 return -EINVAL;
555
556 mmio = insn_decode_mmio(&insn, &size);
557 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
558 return -EINVAL;
559
560 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
561 reg = insn_get_modrm_reg_ptr(&insn, regs);
562 if (!reg)
563 return -EINVAL;
564 }
565
566 if (!fault_in_kernel_space(ve->gla)) {
567 WARN_ONCE(1, "Access to userspace address is not supported");
568 return -EINVAL;
569 }
570
571 /*
572 * Reject EPT violation #VEs that split pages.
573 *
574 * MMIO accesses are supposed to be naturally aligned and therefore
575 * never cross page boundaries. Seeing split page accesses indicates
576 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
577 *
578 * load_unaligned_zeropad() will recover using exception fixups.
579 */
580 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
581 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
582 return -EFAULT;
583
584 /* Handle writes first */
585 switch (mmio) {
586 case INSN_MMIO_WRITE:
587 memcpy(&val, reg, size);
588 if (!mmio_write(size, ve->gpa, val))
589 return -EIO;
590 return insn.length;
591 case INSN_MMIO_WRITE_IMM:
592 val = insn.immediate.value;
593 if (!mmio_write(size, ve->gpa, val))
594 return -EIO;
595 return insn.length;
596 case INSN_MMIO_READ:
597 case INSN_MMIO_READ_ZERO_EXTEND:
598 case INSN_MMIO_READ_SIGN_EXTEND:
599 /* Reads are handled below */
600 break;
601 case INSN_MMIO_MOVS:
602 case INSN_MMIO_DECODE_FAILED:
603 /*
604 * MMIO was accessed with an instruction that could not be
605 * decoded or handled properly. It was likely not using io.h
606 * helpers or accessed MMIO accidentally.
607 */
608 return -EINVAL;
609 default:
610 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
611 return -EINVAL;
612 }
613
614 /* Handle reads */
615 if (!mmio_read(size, ve->gpa, &val))
616 return -EIO;
617
618 switch (mmio) {
619 case INSN_MMIO_READ:
620 /* Zero-extend for 32-bit operation */
621 extend_size = size == 4 ? sizeof(*reg) : 0;
622 break;
623 case INSN_MMIO_READ_ZERO_EXTEND:
624 /* Zero extend based on operand size */
625 extend_size = insn.opnd_bytes;
626 break;
627 case INSN_MMIO_READ_SIGN_EXTEND:
628 /* Sign extend based on operand size */
629 extend_size = insn.opnd_bytes;
630 if (size == 1 && val & BIT(7))
631 extend_val = 0xFF;
632 else if (size > 1 && val & BIT(15))
633 extend_val = 0xFF;
634 break;
635 default:
636 /* All other cases has to be covered with the first switch() */
637 WARN_ON_ONCE(1);
638 return -EINVAL;
639 }
640
641 if (extend_size)
642 memset(reg, extend_val, extend_size);
643 memcpy(reg, &val, size);
644 return insn.length;
645 }
646
handle_in(struct pt_regs * regs,int size,int port)647 static bool handle_in(struct pt_regs *regs, int size, int port)
648 {
649 struct tdx_module_args args = {
650 .r10 = TDX_HYPERCALL_STANDARD,
651 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
652 .r12 = size,
653 .r13 = PORT_READ,
654 .r14 = port,
655 };
656 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
657 bool success;
658
659 /*
660 * Emulate the I/O read via hypercall. More info about ABI can be found
661 * in TDX Guest-Host-Communication Interface (GHCI) section titled
662 * "TDG.VP.VMCALL<Instruction.IO>".
663 */
664 success = !__tdx_hypercall(&args);
665
666 /* Update part of the register affected by the emulated instruction */
667 regs->ax &= ~mask;
668 if (success)
669 regs->ax |= args.r11 & mask;
670
671 return success;
672 }
673
handle_out(struct pt_regs * regs,int size,int port)674 static bool handle_out(struct pt_regs *regs, int size, int port)
675 {
676 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
677
678 /*
679 * Emulate the I/O write via hypercall. More info about ABI can be found
680 * in TDX Guest-Host-Communication Interface (GHCI) section titled
681 * "TDG.VP.VMCALL<Instruction.IO>".
682 */
683 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
684 PORT_WRITE, port, regs->ax & mask);
685 }
686
687 /*
688 * Emulate I/O using hypercall.
689 *
690 * Assumes the IO instruction was using ax, which is enforced
691 * by the standard io.h macros.
692 *
693 * Return True on success or False on failure.
694 */
handle_io(struct pt_regs * regs,struct ve_info * ve)695 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
696 {
697 u32 exit_qual = ve->exit_qual;
698 int size, port;
699 bool in, ret;
700
701 if (VE_IS_IO_STRING(exit_qual))
702 return -EIO;
703
704 in = VE_IS_IO_IN(exit_qual);
705 size = VE_GET_IO_SIZE(exit_qual);
706 port = VE_GET_PORT_NUM(exit_qual);
707
708
709 if (in)
710 ret = handle_in(regs, size, port);
711 else
712 ret = handle_out(regs, size, port);
713 if (!ret)
714 return -EIO;
715
716 return ve_instr_len(ve);
717 }
718
719 /*
720 * Early #VE exception handler. Only handles a subset of port I/O.
721 * Intended only for earlyprintk. If failed, return false.
722 */
tdx_early_handle_ve(struct pt_regs * regs)723 __init bool tdx_early_handle_ve(struct pt_regs *regs)
724 {
725 struct ve_info ve;
726 int insn_len;
727
728 tdx_get_ve_info(&ve);
729
730 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
731 return false;
732
733 insn_len = handle_io(regs, &ve);
734 if (insn_len < 0)
735 return false;
736
737 regs->ip += insn_len;
738 return true;
739 }
740
tdx_get_ve_info(struct ve_info * ve)741 void tdx_get_ve_info(struct ve_info *ve)
742 {
743 struct tdx_module_args args = {};
744
745 /*
746 * Called during #VE handling to retrieve the #VE info from the
747 * TDX module.
748 *
749 * This has to be called early in #VE handling. A "nested" #VE which
750 * occurs before this will raise a #DF and is not recoverable.
751 *
752 * The call retrieves the #VE info from the TDX module, which also
753 * clears the "#VE valid" flag. This must be done before anything else
754 * because any #VE that occurs while the valid flag is set will lead to
755 * #DF.
756 *
757 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
758 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
759 */
760 tdcall(TDG_VP_VEINFO_GET, &args);
761
762 /* Transfer the output parameters */
763 ve->exit_reason = args.rcx;
764 ve->exit_qual = args.rdx;
765 ve->gla = args.r8;
766 ve->gpa = args.r9;
767 ve->instr_len = lower_32_bits(args.r10);
768 ve->instr_info = upper_32_bits(args.r10);
769 }
770
771 /*
772 * Handle the user initiated #VE.
773 *
774 * On success, returns the number of bytes RIP should be incremented (>=0)
775 * or -errno on error.
776 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)777 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
778 {
779 switch (ve->exit_reason) {
780 case EXIT_REASON_CPUID:
781 return handle_cpuid(regs, ve);
782 default:
783 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
784 return -EIO;
785 }
786 }
787
is_private_gpa(u64 gpa)788 static inline bool is_private_gpa(u64 gpa)
789 {
790 return gpa == cc_mkenc(gpa);
791 }
792
793 /*
794 * Handle the kernel #VE.
795 *
796 * On success, returns the number of bytes RIP should be incremented (>=0)
797 * or -errno on error.
798 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)799 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
800 {
801 switch (ve->exit_reason) {
802 case EXIT_REASON_HLT:
803 return handle_halt(ve);
804 case EXIT_REASON_MSR_READ:
805 return read_msr(regs, ve);
806 case EXIT_REASON_MSR_WRITE:
807 return write_msr(regs, ve);
808 case EXIT_REASON_CPUID:
809 return handle_cpuid(regs, ve);
810 case EXIT_REASON_EPT_VIOLATION:
811 if (is_private_gpa(ve->gpa))
812 panic("Unexpected EPT-violation on private memory.");
813 return handle_mmio(regs, ve);
814 case EXIT_REASON_IO_INSTRUCTION:
815 return handle_io(regs, ve);
816 default:
817 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
818 return -EIO;
819 }
820 }
821
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)822 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
823 {
824 int insn_len;
825
826 if (user_mode(regs))
827 insn_len = virt_exception_user(regs, ve);
828 else
829 insn_len = virt_exception_kernel(regs, ve);
830 if (insn_len < 0)
831 return false;
832
833 /* After successful #VE handling, move the IP */
834 regs->ip += insn_len;
835
836 return true;
837 }
838
tdx_tlb_flush_required(bool private)839 static bool tdx_tlb_flush_required(bool private)
840 {
841 /*
842 * TDX guest is responsible for flushing TLB on private->shared
843 * transition. VMM is responsible for flushing on shared->private.
844 *
845 * The VMM _can't_ flush private addresses as it can't generate PAs
846 * with the guest's HKID. Shared memory isn't subject to integrity
847 * checking, i.e. the VMM doesn't need to flush for its own protection.
848 *
849 * There's no need to flush when converting from shared to private,
850 * as flushing is the VMM's responsibility in this case, e.g. it must
851 * flush to avoid integrity failures in the face of a buggy or
852 * malicious guest.
853 */
854 return !private;
855 }
856
tdx_cache_flush_required(void)857 static bool tdx_cache_flush_required(void)
858 {
859 /*
860 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
861 * TDX doesn't have such capability.
862 *
863 * Flush cache unconditionally.
864 */
865 return true;
866 }
867
868 /*
869 * Notify the VMM about page mapping conversion. More info about ABI
870 * can be found in TDX Guest-Host-Communication Interface (GHCI),
871 * section "TDG.VP.VMCALL<MapGPA>".
872 */
tdx_map_gpa(phys_addr_t start,phys_addr_t end,bool enc)873 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
874 {
875 /* Retrying the hypercall a second time should succeed; use 3 just in case */
876 const int max_retries_per_page = 3;
877 int retry_count = 0;
878
879 if (!enc) {
880 /* Set the shared (decrypted) bits: */
881 start |= cc_mkdec(0);
882 end |= cc_mkdec(0);
883 }
884
885 while (retry_count < max_retries_per_page) {
886 struct tdx_module_args args = {
887 .r10 = TDX_HYPERCALL_STANDARD,
888 .r11 = TDVMCALL_MAP_GPA,
889 .r12 = start,
890 .r13 = end - start };
891
892 u64 map_fail_paddr;
893 u64 ret = __tdx_hypercall(&args);
894
895 if (ret != TDVMCALL_STATUS_RETRY)
896 return !ret;
897 /*
898 * The guest must retry the operation for the pages in the
899 * region starting at the GPA specified in R11. R11 comes
900 * from the untrusted VMM. Sanity check it.
901 */
902 map_fail_paddr = args.r11;
903 if (map_fail_paddr < start || map_fail_paddr >= end)
904 return false;
905
906 /* "Consume" a retry without forward progress */
907 if (map_fail_paddr == start) {
908 retry_count++;
909 continue;
910 }
911
912 start = map_fail_paddr;
913 retry_count = 0;
914 }
915
916 return false;
917 }
918
919 /*
920 * Inform the VMM of the guest's intent for this physical page: shared with
921 * the VMM or private to the guest. The VMM is expected to change its mapping
922 * of the page in response.
923 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)924 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
925 {
926 phys_addr_t start = __pa(vaddr);
927 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
928
929 if (!tdx_map_gpa(start, end, enc))
930 return false;
931
932 /* shared->private conversion requires memory to be accepted before use */
933 if (enc)
934 return tdx_accept_memory(start, end);
935
936 return true;
937 }
938
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)939 static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
940 bool enc)
941 {
942 /*
943 * Only handle shared->private conversion here.
944 * See the comment in tdx_early_init().
945 */
946 if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
947 return -EIO;
948
949 return 0;
950 }
951
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)952 static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
953 bool enc)
954 {
955 /*
956 * Only handle private->shared conversion here.
957 * See the comment in tdx_early_init().
958 */
959 if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
960 return -EIO;
961
962 if (enc)
963 atomic_long_sub(numpages, &nr_shared);
964 else
965 atomic_long_add(numpages, &nr_shared);
966
967 return 0;
968 }
969
970 /* Stop new private<->shared conversions */
tdx_kexec_begin(void)971 static void tdx_kexec_begin(void)
972 {
973 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
974 return;
975
976 /*
977 * Crash kernel reaches here with interrupts disabled: can't wait for
978 * conversions to finish.
979 *
980 * If race happened, just report and proceed.
981 */
982 if (!set_memory_enc_stop_conversion())
983 pr_warn("Failed to stop shared<->private conversions\n");
984 }
985
986 /* Walk direct mapping and convert all shared memory back to private */
tdx_kexec_finish(void)987 static void tdx_kexec_finish(void)
988 {
989 unsigned long addr, end;
990 long found = 0, shared;
991
992 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
993 return;
994
995 lockdep_assert_irqs_disabled();
996
997 addr = PAGE_OFFSET;
998 end = PAGE_OFFSET + get_max_mapped();
999
1000 while (addr < end) {
1001 unsigned long size;
1002 unsigned int level;
1003 pte_t *pte;
1004
1005 pte = lookup_address(addr, &level);
1006 size = page_level_size(level);
1007
1008 if (pte && pte_decrypted(*pte)) {
1009 int pages = size / PAGE_SIZE;
1010
1011 /*
1012 * Touching memory with shared bit set triggers implicit
1013 * conversion to shared.
1014 *
1015 * Make sure nobody touches the shared range from
1016 * now on.
1017 */
1018 set_pte(pte, __pte(0));
1019
1020 /*
1021 * Memory encryption state persists across kexec.
1022 * If tdx_enc_status_changed() fails in the first
1023 * kernel, it leaves memory in an unknown state.
1024 *
1025 * If that memory remains shared, accessing it in the
1026 * *next* kernel through a private mapping will result
1027 * in an unrecoverable guest shutdown.
1028 *
1029 * The kdump kernel boot is not impacted as it uses
1030 * a pre-reserved memory range that is always private.
1031 * However, gathering crash information could lead to
1032 * a crash if it accesses unconverted memory through
1033 * a private mapping which is possible when accessing
1034 * that memory through /proc/vmcore, for example.
1035 *
1036 * In all cases, print error info in order to leave
1037 * enough bread crumbs for debugging.
1038 */
1039 if (!tdx_enc_status_changed(addr, pages, true)) {
1040 pr_err("Failed to unshare range %#lx-%#lx\n",
1041 addr, addr + size);
1042 }
1043
1044 found += pages;
1045 }
1046
1047 addr += size;
1048 }
1049
1050 __flush_tlb_all();
1051
1052 shared = atomic_long_read(&nr_shared);
1053 if (shared != found) {
1054 pr_err("shared page accounting is off\n");
1055 pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
1056 }
1057 }
1058
tdx_announce(void)1059 static __init void tdx_announce(void)
1060 {
1061 struct tdx_module_args args = {};
1062 u64 controls;
1063
1064 pr_info("Guest detected\n");
1065
1066 tdcall(TDG_VP_INFO, &args);
1067 tdx_dump_attributes(args.rdx);
1068
1069 tdg_vm_rd(TDCS_TD_CTLS, &controls);
1070 tdx_dump_td_ctls(controls);
1071 }
1072
tdx_early_init(void)1073 void __init tdx_early_init(void)
1074 {
1075 u64 cc_mask;
1076 u32 eax, sig[3];
1077
1078 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
1079
1080 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
1081 return;
1082
1083 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
1084
1085 /* TSC is the only reliable clock in TDX guest */
1086 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
1087
1088 cc_vendor = CC_VENDOR_INTEL;
1089
1090 /* Configure the TD */
1091 tdx_setup(&cc_mask);
1092
1093 cc_set_mask(cc_mask);
1094
1095 /*
1096 * All bits above GPA width are reserved and kernel treats shared bit
1097 * as flag, not as part of physical address.
1098 *
1099 * Adjust physical mask to only cover valid GPA bits.
1100 */
1101 physical_mask &= cc_mask - 1;
1102
1103 /*
1104 * The kernel mapping should match the TDX metadata for the page.
1105 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
1106 * owned by the caller and can catch even _momentary_ mismatches. Bad
1107 * things happen on mismatch:
1108 *
1109 * - Private mapping => Shared Page == Guest shutdown
1110 * - Shared mapping => Private Page == Recoverable #VE
1111 *
1112 * guest.enc_status_change_prepare() converts the page from
1113 * shared=>private before the mapping becomes private.
1114 *
1115 * guest.enc_status_change_finish() converts the page from
1116 * private=>shared after the mapping becomes private.
1117 *
1118 * In both cases there is a temporary shared mapping to a private page,
1119 * which can result in a #VE. But, there is never a private mapping to
1120 * a shared page.
1121 */
1122 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
1123 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
1124
1125 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
1126 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
1127
1128 x86_platform.guest.enc_kexec_begin = tdx_kexec_begin;
1129 x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
1130
1131 /*
1132 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
1133 * will enable interrupts before HLT TDCALL invocation if executed
1134 * in STI-shadow, possibly resulting in missed wakeup events.
1135 *
1136 * Modify all possible HLT execution paths to use TDX specific routines
1137 * that directly execute TDCALL and toggle the interrupt state as
1138 * needed after TDCALL completion. This also reduces HLT related #VEs
1139 * in addition to having a reliable halt logic execution.
1140 */
1141 pv_ops.irq.safe_halt = tdx_safe_halt;
1142 pv_ops.irq.halt = tdx_halt;
1143
1144 /*
1145 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
1146 * bringup low level code. That raises #VE which cannot be handled
1147 * there.
1148 *
1149 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
1150 * implemented separately in the low level startup ASM code.
1151 * Until that is in place, disable parallel bringup for TDX.
1152 */
1153 x86_cpuinit.parallel_bringup = false;
1154
1155 tdx_announce();
1156 }
1157