xref: /linux/arch/x86/coco/tdx/tdx.c (revision f7f0adfe64de08803990dc4cbecd2849c04e314a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 
4 #undef pr_fmt
5 #define pr_fmt(fmt)     "tdx: " fmt
6 
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/kexec.h>
11 #include <asm/coco.h>
12 #include <asm/tdx.h>
13 #include <asm/vmx.h>
14 #include <asm/ia32.h>
15 #include <asm/insn.h>
16 #include <asm/insn-eval.h>
17 #include <asm/pgtable.h>
18 #include <asm/set_memory.h>
19 #include <asm/traps.h>
20 
21 /* MMIO direction */
22 #define EPT_READ	0
23 #define EPT_WRITE	1
24 
25 /* Port I/O direction */
26 #define PORT_READ	0
27 #define PORT_WRITE	1
28 
29 /* See Exit Qualification for I/O Instructions in VMX documentation */
30 #define VE_IS_IO_IN(e)		((e) & BIT(3))
31 #define VE_GET_IO_SIZE(e)	(((e) & GENMASK(2, 0)) + 1)
32 #define VE_GET_PORT_NUM(e)	((e) >> 16)
33 #define VE_IS_IO_STRING(e)	((e) & BIT(4))
34 
35 /* TDX Module call error codes */
36 #define TDCALL_RETURN_CODE(a)	((a) >> 32)
37 #define TDCALL_INVALID_OPERAND	0xc0000100
38 
39 #define TDREPORT_SUBTYPE_0	0
40 
41 static atomic_long_t nr_shared;
42 
43 /* Called from __tdx_hypercall() for unrecoverable failure */
44 noinstr void __noreturn __tdx_hypercall_failed(void)
45 {
46 	instrumentation_begin();
47 	panic("TDVMCALL failed. TDX module bug?");
48 }
49 
50 #ifdef CONFIG_KVM_GUEST
51 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
52 		       unsigned long p3, unsigned long p4)
53 {
54 	struct tdx_module_args args = {
55 		.r10 = nr,
56 		.r11 = p1,
57 		.r12 = p2,
58 		.r13 = p3,
59 		.r14 = p4,
60 	};
61 
62 	return __tdx_hypercall(&args);
63 }
64 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
65 #endif
66 
67 /*
68  * Used for TDX guests to make calls directly to the TD module.  This
69  * should only be used for calls that have no legitimate reason to fail
70  * or where the kernel can not survive the call failing.
71  */
72 static inline void tdcall(u64 fn, struct tdx_module_args *args)
73 {
74 	if (__tdcall_ret(fn, args))
75 		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
76 }
77 
78 /* Read TD-scoped metadata */
79 static inline u64 tdg_vm_rd(u64 field, u64 *value)
80 {
81 	struct tdx_module_args args = {
82 		.rdx = field,
83 	};
84 	u64 ret;
85 
86 	ret = __tdcall_ret(TDG_VM_RD, &args);
87 	*value = args.r8;
88 
89 	return ret;
90 }
91 
92 /* Write TD-scoped metadata */
93 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
94 {
95 	struct tdx_module_args args = {
96 		.rdx = field,
97 		.r8 = value,
98 		.r9 = mask,
99 	};
100 
101 	return __tdcall(TDG_VM_WR, &args);
102 }
103 
104 /**
105  * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
106  *                           subtype 0) using TDG.MR.REPORT TDCALL.
107  * @reportdata: Address of the input buffer which contains user-defined
108  *              REPORTDATA to be included into TDREPORT.
109  * @tdreport: Address of the output buffer to store TDREPORT.
110  *
111  * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
112  * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
113  * It is used in the TDX guest driver module to get the TDREPORT0.
114  *
115  * Return 0 on success, -EINVAL for invalid operands, or -EIO on
116  * other TDCALL failures.
117  */
118 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
119 {
120 	struct tdx_module_args args = {
121 		.rcx = virt_to_phys(tdreport),
122 		.rdx = virt_to_phys(reportdata),
123 		.r8 = TDREPORT_SUBTYPE_0,
124 	};
125 	u64 ret;
126 
127 	ret = __tdcall(TDG_MR_REPORT, &args);
128 	if (ret) {
129 		if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
130 			return -EINVAL;
131 		return -EIO;
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
137 
138 /**
139  * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
140  *                         hypercall.
141  * @buf: Address of the directly mapped shared kernel buffer which
142  *       contains TDREPORT. The same buffer will be used by VMM to
143  *       store the generated TD Quote output.
144  * @size: size of the tdquote buffer (4KB-aligned).
145  *
146  * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
147  * v1.0 specification for more information on GetQuote hypercall.
148  * It is used in the TDX guest driver module to get the TD Quote.
149  *
150  * Return 0 on success or error code on failure.
151  */
152 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
153 {
154 	/* Since buf is a shared memory, set the shared (decrypted) bits */
155 	return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
156 }
157 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
158 
159 static void __noreturn tdx_panic(const char *msg)
160 {
161 	struct tdx_module_args args = {
162 		.r10 = TDX_HYPERCALL_STANDARD,
163 		.r11 = TDVMCALL_REPORT_FATAL_ERROR,
164 		.r12 = 0, /* Error code: 0 is Panic */
165 	};
166 	union {
167 		/* Define register order according to the GHCI */
168 		struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
169 
170 		char str[64];
171 	} message;
172 
173 	/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
174 	strtomem_pad(message.str, msg, '\0');
175 
176 	args.r8  = message.r8;
177 	args.r9  = message.r9;
178 	args.r14 = message.r14;
179 	args.r15 = message.r15;
180 	args.rdi = message.rdi;
181 	args.rsi = message.rsi;
182 	args.rbx = message.rbx;
183 	args.rdx = message.rdx;
184 
185 	/*
186 	 * This hypercall should never return and it is not safe
187 	 * to keep the guest running. Call it forever if it
188 	 * happens to return.
189 	 */
190 	while (1)
191 		__tdx_hypercall(&args);
192 }
193 
194 /*
195  * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
196  * that no #VE will be delivered for accesses to TD-private memory.
197  *
198  * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
199  * controls if the guest will receive such #VE with TD attribute
200  * TDX_ATTR_SEPT_VE_DISABLE.
201  *
202  * Newer TDX modules allow the guest to control if it wants to receive SEPT
203  * violation #VEs.
204  *
205  * Check if the feature is available and disable SEPT #VE if possible.
206  *
207  * If the TD is allowed to disable/enable SEPT #VEs, the TDX_ATTR_SEPT_VE_DISABLE
208  * attribute is no longer reliable. It reflects the initial state of the
209  * control for the TD, but it will not be updated if someone (e.g. bootloader)
210  * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
211  * determine if SEPT #VEs are enabled or disabled.
212  */
213 static void disable_sept_ve(u64 td_attr)
214 {
215 	const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
216 	bool debug = td_attr & TDX_ATTR_DEBUG;
217 	u64 config, controls;
218 
219 	/* Is this TD allowed to disable SEPT #VE */
220 	tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
221 	if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
222 		/* No SEPT #VE controls for the guest: check the attribute */
223 		if (td_attr & TDX_ATTR_SEPT_VE_DISABLE)
224 			return;
225 
226 		/* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
227 		if (debug)
228 			pr_warn("%s\n", msg);
229 		else
230 			tdx_panic(msg);
231 		return;
232 	}
233 
234 	/* Check if SEPT #VE has been disabled before us */
235 	tdg_vm_rd(TDCS_TD_CTLS, &controls);
236 	if (controls & TD_CTLS_PENDING_VE_DISABLE)
237 		return;
238 
239 	/* Keep #VEs enabled for splats in debugging environments */
240 	if (debug)
241 		return;
242 
243 	/* Disable SEPT #VEs */
244 	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
245 		  TD_CTLS_PENDING_VE_DISABLE);
246 }
247 
248 /*
249  * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
250  * 0x1F) and the X2APIC_APICID MSR. The kernel returns all zeros on CPUID #VEs.
251  * In practice, this means that the kernel can only boot with a plain topology.
252  * Any complications will cause problems.
253  *
254  * The ENUM_TOPOLOGY feature allows the VMM to provide topology information.
255  * Enabling the feature  eliminates topology-related #VEs: the TDX module
256  * virtualizes accesses to the CPUID leafs and the MSR.
257  *
258  * Enable ENUM_TOPOLOGY if it is available.
259  */
260 static void enable_cpu_topology_enumeration(void)
261 {
262 	u64 configured;
263 
264 	/* Has the VMM provided a valid topology configuration? */
265 	tdg_vm_rd(TDCS_TOPOLOGY_ENUM_CONFIGURED, &configured);
266 	if (!configured) {
267 		pr_err("VMM did not configure X2APIC_IDs properly\n");
268 		return;
269 	}
270 
271 	tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_ENUM_TOPOLOGY, TD_CTLS_ENUM_TOPOLOGY);
272 }
273 
274 static void reduce_unnecessary_ve(void)
275 {
276 	u64 err = tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_REDUCE_VE, TD_CTLS_REDUCE_VE);
277 
278 	if (err == TDX_SUCCESS)
279 		return;
280 
281 	/*
282 	 * Enabling REDUCE_VE includes ENUM_TOPOLOGY. Only try to
283 	 * enable ENUM_TOPOLOGY if REDUCE_VE was not successful.
284 	 */
285 	enable_cpu_topology_enumeration();
286 }
287 
288 static void tdx_setup(u64 *cc_mask)
289 {
290 	struct tdx_module_args args = {};
291 	unsigned int gpa_width;
292 	u64 td_attr;
293 
294 	/*
295 	 * TDINFO TDX module call is used to get the TD execution environment
296 	 * information like GPA width, number of available vcpus, debug mode
297 	 * information, etc. More details about the ABI can be found in TDX
298 	 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
299 	 * [TDG.VP.INFO].
300 	 */
301 	tdcall(TDG_VP_INFO, &args);
302 
303 	/*
304 	 * The highest bit of a guest physical address is the "sharing" bit.
305 	 * Set it for shared pages and clear it for private pages.
306 	 *
307 	 * The GPA width that comes out of this call is critical. TDX guests
308 	 * can not meaningfully run without it.
309 	 */
310 	gpa_width = args.rcx & GENMASK(5, 0);
311 	*cc_mask = BIT_ULL(gpa_width - 1);
312 
313 	td_attr = args.rdx;
314 
315 	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
316 	tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
317 
318 	disable_sept_ve(td_attr);
319 
320 	reduce_unnecessary_ve();
321 }
322 
323 /*
324  * The TDX module spec states that #VE may be injected for a limited set of
325  * reasons:
326  *
327  *  - Emulation of the architectural #VE injection on EPT violation;
328  *
329  *  - As a result of guest TD execution of a disallowed instruction,
330  *    a disallowed MSR access, or CPUID virtualization;
331  *
332  *  - A notification to the guest TD about anomalous behavior;
333  *
334  * The last one is opt-in and is not used by the kernel.
335  *
336  * The Intel Software Developer's Manual describes cases when instruction
337  * length field can be used in section "Information for VM Exits Due to
338  * Instruction Execution".
339  *
340  * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
341  * information if #VE occurred due to instruction execution, but not for EPT
342  * violations.
343  */
344 static int ve_instr_len(struct ve_info *ve)
345 {
346 	switch (ve->exit_reason) {
347 	case EXIT_REASON_HLT:
348 	case EXIT_REASON_MSR_READ:
349 	case EXIT_REASON_MSR_WRITE:
350 	case EXIT_REASON_CPUID:
351 	case EXIT_REASON_IO_INSTRUCTION:
352 		/* It is safe to use ve->instr_len for #VE due instructions */
353 		return ve->instr_len;
354 	case EXIT_REASON_EPT_VIOLATION:
355 		/*
356 		 * For EPT violations, ve->insn_len is not defined. For those,
357 		 * the kernel must decode instructions manually and should not
358 		 * be using this function.
359 		 */
360 		WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
361 		return 0;
362 	default:
363 		WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
364 		return ve->instr_len;
365 	}
366 }
367 
368 static u64 __cpuidle __halt(const bool irq_disabled)
369 {
370 	struct tdx_module_args args = {
371 		.r10 = TDX_HYPERCALL_STANDARD,
372 		.r11 = hcall_func(EXIT_REASON_HLT),
373 		.r12 = irq_disabled,
374 	};
375 
376 	/*
377 	 * Emulate HLT operation via hypercall. More info about ABI
378 	 * can be found in TDX Guest-Host-Communication Interface
379 	 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
380 	 *
381 	 * The VMM uses the "IRQ disabled" param to understand IRQ
382 	 * enabled status (RFLAGS.IF) of the TD guest and to determine
383 	 * whether or not it should schedule the halted vCPU if an
384 	 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
385 	 * can keep the vCPU in virtual HLT, even if an IRQ is
386 	 * pending, without hanging/breaking the guest.
387 	 */
388 	return __tdx_hypercall(&args);
389 }
390 
391 static int handle_halt(struct ve_info *ve)
392 {
393 	const bool irq_disabled = irqs_disabled();
394 
395 	if (__halt(irq_disabled))
396 		return -EIO;
397 
398 	return ve_instr_len(ve);
399 }
400 
401 void __cpuidle tdx_safe_halt(void)
402 {
403 	const bool irq_disabled = false;
404 
405 	/*
406 	 * Use WARN_ONCE() to report the failure.
407 	 */
408 	if (__halt(irq_disabled))
409 		WARN_ONCE(1, "HLT instruction emulation failed\n");
410 }
411 
412 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
413 {
414 	struct tdx_module_args args = {
415 		.r10 = TDX_HYPERCALL_STANDARD,
416 		.r11 = hcall_func(EXIT_REASON_MSR_READ),
417 		.r12 = regs->cx,
418 	};
419 
420 	/*
421 	 * Emulate the MSR read via hypercall. More info about ABI
422 	 * can be found in TDX Guest-Host-Communication Interface
423 	 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
424 	 */
425 	if (__tdx_hypercall(&args))
426 		return -EIO;
427 
428 	regs->ax = lower_32_bits(args.r11);
429 	regs->dx = upper_32_bits(args.r11);
430 	return ve_instr_len(ve);
431 }
432 
433 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
434 {
435 	struct tdx_module_args args = {
436 		.r10 = TDX_HYPERCALL_STANDARD,
437 		.r11 = hcall_func(EXIT_REASON_MSR_WRITE),
438 		.r12 = regs->cx,
439 		.r13 = (u64)regs->dx << 32 | regs->ax,
440 	};
441 
442 	/*
443 	 * Emulate the MSR write via hypercall. More info about ABI
444 	 * can be found in TDX Guest-Host-Communication Interface
445 	 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
446 	 */
447 	if (__tdx_hypercall(&args))
448 		return -EIO;
449 
450 	return ve_instr_len(ve);
451 }
452 
453 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
454 {
455 	struct tdx_module_args args = {
456 		.r10 = TDX_HYPERCALL_STANDARD,
457 		.r11 = hcall_func(EXIT_REASON_CPUID),
458 		.r12 = regs->ax,
459 		.r13 = regs->cx,
460 	};
461 
462 	/*
463 	 * Only allow VMM to control range reserved for hypervisor
464 	 * communication.
465 	 *
466 	 * Return all-zeros for any CPUID outside the range. It matches CPU
467 	 * behaviour for non-supported leaf.
468 	 */
469 	if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
470 		regs->ax = regs->bx = regs->cx = regs->dx = 0;
471 		return ve_instr_len(ve);
472 	}
473 
474 	/*
475 	 * Emulate the CPUID instruction via a hypercall. More info about
476 	 * ABI can be found in TDX Guest-Host-Communication Interface
477 	 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
478 	 */
479 	if (__tdx_hypercall(&args))
480 		return -EIO;
481 
482 	/*
483 	 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
484 	 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
485 	 * So copy the register contents back to pt_regs.
486 	 */
487 	regs->ax = args.r12;
488 	regs->bx = args.r13;
489 	regs->cx = args.r14;
490 	regs->dx = args.r15;
491 
492 	return ve_instr_len(ve);
493 }
494 
495 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
496 {
497 	struct tdx_module_args args = {
498 		.r10 = TDX_HYPERCALL_STANDARD,
499 		.r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
500 		.r12 = size,
501 		.r13 = EPT_READ,
502 		.r14 = addr,
503 	};
504 
505 	if (__tdx_hypercall(&args))
506 		return false;
507 
508 	*val = args.r11;
509 	return true;
510 }
511 
512 static bool mmio_write(int size, unsigned long addr, unsigned long val)
513 {
514 	return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
515 			       EPT_WRITE, addr, val);
516 }
517 
518 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
519 {
520 	unsigned long *reg, val, vaddr;
521 	char buffer[MAX_INSN_SIZE];
522 	enum insn_mmio_type mmio;
523 	struct insn insn = {};
524 	int size, extend_size;
525 	u8 extend_val = 0;
526 
527 	/* Only in-kernel MMIO is supported */
528 	if (WARN_ON_ONCE(user_mode(regs)))
529 		return -EFAULT;
530 
531 	if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
532 		return -EFAULT;
533 
534 	if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
535 		return -EINVAL;
536 
537 	mmio = insn_decode_mmio(&insn, &size);
538 	if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
539 		return -EINVAL;
540 
541 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
542 		reg = insn_get_modrm_reg_ptr(&insn, regs);
543 		if (!reg)
544 			return -EINVAL;
545 	}
546 
547 	if (!fault_in_kernel_space(ve->gla)) {
548 		WARN_ONCE(1, "Access to userspace address is not supported");
549 		return -EINVAL;
550 	}
551 
552 	/*
553 	 * Reject EPT violation #VEs that split pages.
554 	 *
555 	 * MMIO accesses are supposed to be naturally aligned and therefore
556 	 * never cross page boundaries. Seeing split page accesses indicates
557 	 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
558 	 *
559 	 * load_unaligned_zeropad() will recover using exception fixups.
560 	 */
561 	vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
562 	if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
563 		return -EFAULT;
564 
565 	/* Handle writes first */
566 	switch (mmio) {
567 	case INSN_MMIO_WRITE:
568 		memcpy(&val, reg, size);
569 		if (!mmio_write(size, ve->gpa, val))
570 			return -EIO;
571 		return insn.length;
572 	case INSN_MMIO_WRITE_IMM:
573 		val = insn.immediate.value;
574 		if (!mmio_write(size, ve->gpa, val))
575 			return -EIO;
576 		return insn.length;
577 	case INSN_MMIO_READ:
578 	case INSN_MMIO_READ_ZERO_EXTEND:
579 	case INSN_MMIO_READ_SIGN_EXTEND:
580 		/* Reads are handled below */
581 		break;
582 	case INSN_MMIO_MOVS:
583 	case INSN_MMIO_DECODE_FAILED:
584 		/*
585 		 * MMIO was accessed with an instruction that could not be
586 		 * decoded or handled properly. It was likely not using io.h
587 		 * helpers or accessed MMIO accidentally.
588 		 */
589 		return -EINVAL;
590 	default:
591 		WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
592 		return -EINVAL;
593 	}
594 
595 	/* Handle reads */
596 	if (!mmio_read(size, ve->gpa, &val))
597 		return -EIO;
598 
599 	switch (mmio) {
600 	case INSN_MMIO_READ:
601 		/* Zero-extend for 32-bit operation */
602 		extend_size = size == 4 ? sizeof(*reg) : 0;
603 		break;
604 	case INSN_MMIO_READ_ZERO_EXTEND:
605 		/* Zero extend based on operand size */
606 		extend_size = insn.opnd_bytes;
607 		break;
608 	case INSN_MMIO_READ_SIGN_EXTEND:
609 		/* Sign extend based on operand size */
610 		extend_size = insn.opnd_bytes;
611 		if (size == 1 && val & BIT(7))
612 			extend_val = 0xFF;
613 		else if (size > 1 && val & BIT(15))
614 			extend_val = 0xFF;
615 		break;
616 	default:
617 		/* All other cases has to be covered with the first switch() */
618 		WARN_ON_ONCE(1);
619 		return -EINVAL;
620 	}
621 
622 	if (extend_size)
623 		memset(reg, extend_val, extend_size);
624 	memcpy(reg, &val, size);
625 	return insn.length;
626 }
627 
628 static bool handle_in(struct pt_regs *regs, int size, int port)
629 {
630 	struct tdx_module_args args = {
631 		.r10 = TDX_HYPERCALL_STANDARD,
632 		.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
633 		.r12 = size,
634 		.r13 = PORT_READ,
635 		.r14 = port,
636 	};
637 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
638 	bool success;
639 
640 	/*
641 	 * Emulate the I/O read via hypercall. More info about ABI can be found
642 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
643 	 * "TDG.VP.VMCALL<Instruction.IO>".
644 	 */
645 	success = !__tdx_hypercall(&args);
646 
647 	/* Update part of the register affected by the emulated instruction */
648 	regs->ax &= ~mask;
649 	if (success)
650 		regs->ax |= args.r11 & mask;
651 
652 	return success;
653 }
654 
655 static bool handle_out(struct pt_regs *regs, int size, int port)
656 {
657 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
658 
659 	/*
660 	 * Emulate the I/O write via hypercall. More info about ABI can be found
661 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
662 	 * "TDG.VP.VMCALL<Instruction.IO>".
663 	 */
664 	return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
665 			       PORT_WRITE, port, regs->ax & mask);
666 }
667 
668 /*
669  * Emulate I/O using hypercall.
670  *
671  * Assumes the IO instruction was using ax, which is enforced
672  * by the standard io.h macros.
673  *
674  * Return True on success or False on failure.
675  */
676 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
677 {
678 	u32 exit_qual = ve->exit_qual;
679 	int size, port;
680 	bool in, ret;
681 
682 	if (VE_IS_IO_STRING(exit_qual))
683 		return -EIO;
684 
685 	in   = VE_IS_IO_IN(exit_qual);
686 	size = VE_GET_IO_SIZE(exit_qual);
687 	port = VE_GET_PORT_NUM(exit_qual);
688 
689 
690 	if (in)
691 		ret = handle_in(regs, size, port);
692 	else
693 		ret = handle_out(regs, size, port);
694 	if (!ret)
695 		return -EIO;
696 
697 	return ve_instr_len(ve);
698 }
699 
700 /*
701  * Early #VE exception handler. Only handles a subset of port I/O.
702  * Intended only for earlyprintk. If failed, return false.
703  */
704 __init bool tdx_early_handle_ve(struct pt_regs *regs)
705 {
706 	struct ve_info ve;
707 	int insn_len;
708 
709 	tdx_get_ve_info(&ve);
710 
711 	if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
712 		return false;
713 
714 	insn_len = handle_io(regs, &ve);
715 	if (insn_len < 0)
716 		return false;
717 
718 	regs->ip += insn_len;
719 	return true;
720 }
721 
722 void tdx_get_ve_info(struct ve_info *ve)
723 {
724 	struct tdx_module_args args = {};
725 
726 	/*
727 	 * Called during #VE handling to retrieve the #VE info from the
728 	 * TDX module.
729 	 *
730 	 * This has to be called early in #VE handling.  A "nested" #VE which
731 	 * occurs before this will raise a #DF and is not recoverable.
732 	 *
733 	 * The call retrieves the #VE info from the TDX module, which also
734 	 * clears the "#VE valid" flag. This must be done before anything else
735 	 * because any #VE that occurs while the valid flag is set will lead to
736 	 * #DF.
737 	 *
738 	 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
739 	 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
740 	 */
741 	tdcall(TDG_VP_VEINFO_GET, &args);
742 
743 	/* Transfer the output parameters */
744 	ve->exit_reason = args.rcx;
745 	ve->exit_qual   = args.rdx;
746 	ve->gla         = args.r8;
747 	ve->gpa         = args.r9;
748 	ve->instr_len   = lower_32_bits(args.r10);
749 	ve->instr_info  = upper_32_bits(args.r10);
750 }
751 
752 /*
753  * Handle the user initiated #VE.
754  *
755  * On success, returns the number of bytes RIP should be incremented (>=0)
756  * or -errno on error.
757  */
758 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
759 {
760 	switch (ve->exit_reason) {
761 	case EXIT_REASON_CPUID:
762 		return handle_cpuid(regs, ve);
763 	default:
764 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
765 		return -EIO;
766 	}
767 }
768 
769 static inline bool is_private_gpa(u64 gpa)
770 {
771 	return gpa == cc_mkenc(gpa);
772 }
773 
774 /*
775  * Handle the kernel #VE.
776  *
777  * On success, returns the number of bytes RIP should be incremented (>=0)
778  * or -errno on error.
779  */
780 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
781 {
782 	switch (ve->exit_reason) {
783 	case EXIT_REASON_HLT:
784 		return handle_halt(ve);
785 	case EXIT_REASON_MSR_READ:
786 		return read_msr(regs, ve);
787 	case EXIT_REASON_MSR_WRITE:
788 		return write_msr(regs, ve);
789 	case EXIT_REASON_CPUID:
790 		return handle_cpuid(regs, ve);
791 	case EXIT_REASON_EPT_VIOLATION:
792 		if (is_private_gpa(ve->gpa))
793 			panic("Unexpected EPT-violation on private memory.");
794 		return handle_mmio(regs, ve);
795 	case EXIT_REASON_IO_INSTRUCTION:
796 		return handle_io(regs, ve);
797 	default:
798 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
799 		return -EIO;
800 	}
801 }
802 
803 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
804 {
805 	int insn_len;
806 
807 	if (user_mode(regs))
808 		insn_len = virt_exception_user(regs, ve);
809 	else
810 		insn_len = virt_exception_kernel(regs, ve);
811 	if (insn_len < 0)
812 		return false;
813 
814 	/* After successful #VE handling, move the IP */
815 	regs->ip += insn_len;
816 
817 	return true;
818 }
819 
820 static bool tdx_tlb_flush_required(bool private)
821 {
822 	/*
823 	 * TDX guest is responsible for flushing TLB on private->shared
824 	 * transition. VMM is responsible for flushing on shared->private.
825 	 *
826 	 * The VMM _can't_ flush private addresses as it can't generate PAs
827 	 * with the guest's HKID.  Shared memory isn't subject to integrity
828 	 * checking, i.e. the VMM doesn't need to flush for its own protection.
829 	 *
830 	 * There's no need to flush when converting from shared to private,
831 	 * as flushing is the VMM's responsibility in this case, e.g. it must
832 	 * flush to avoid integrity failures in the face of a buggy or
833 	 * malicious guest.
834 	 */
835 	return !private;
836 }
837 
838 static bool tdx_cache_flush_required(void)
839 {
840 	/*
841 	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
842 	 * TDX doesn't have such capability.
843 	 *
844 	 * Flush cache unconditionally.
845 	 */
846 	return true;
847 }
848 
849 /*
850  * Notify the VMM about page mapping conversion. More info about ABI
851  * can be found in TDX Guest-Host-Communication Interface (GHCI),
852  * section "TDG.VP.VMCALL<MapGPA>".
853  */
854 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
855 {
856 	/* Retrying the hypercall a second time should succeed; use 3 just in case */
857 	const int max_retries_per_page = 3;
858 	int retry_count = 0;
859 
860 	if (!enc) {
861 		/* Set the shared (decrypted) bits: */
862 		start |= cc_mkdec(0);
863 		end   |= cc_mkdec(0);
864 	}
865 
866 	while (retry_count < max_retries_per_page) {
867 		struct tdx_module_args args = {
868 			.r10 = TDX_HYPERCALL_STANDARD,
869 			.r11 = TDVMCALL_MAP_GPA,
870 			.r12 = start,
871 			.r13 = end - start };
872 
873 		u64 map_fail_paddr;
874 		u64 ret = __tdx_hypercall(&args);
875 
876 		if (ret != TDVMCALL_STATUS_RETRY)
877 			return !ret;
878 		/*
879 		 * The guest must retry the operation for the pages in the
880 		 * region starting at the GPA specified in R11. R11 comes
881 		 * from the untrusted VMM. Sanity check it.
882 		 */
883 		map_fail_paddr = args.r11;
884 		if (map_fail_paddr < start || map_fail_paddr >= end)
885 			return false;
886 
887 		/* "Consume" a retry without forward progress */
888 		if (map_fail_paddr == start) {
889 			retry_count++;
890 			continue;
891 		}
892 
893 		start = map_fail_paddr;
894 		retry_count = 0;
895 	}
896 
897 	return false;
898 }
899 
900 /*
901  * Inform the VMM of the guest's intent for this physical page: shared with
902  * the VMM or private to the guest.  The VMM is expected to change its mapping
903  * of the page in response.
904  */
905 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
906 {
907 	phys_addr_t start = __pa(vaddr);
908 	phys_addr_t end   = __pa(vaddr + numpages * PAGE_SIZE);
909 
910 	if (!tdx_map_gpa(start, end, enc))
911 		return false;
912 
913 	/* shared->private conversion requires memory to be accepted before use */
914 	if (enc)
915 		return tdx_accept_memory(start, end);
916 
917 	return true;
918 }
919 
920 static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
921 					 bool enc)
922 {
923 	/*
924 	 * Only handle shared->private conversion here.
925 	 * See the comment in tdx_early_init().
926 	 */
927 	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
928 		return -EIO;
929 
930 	return 0;
931 }
932 
933 static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
934 					 bool enc)
935 {
936 	/*
937 	 * Only handle private->shared conversion here.
938 	 * See the comment in tdx_early_init().
939 	 */
940 	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
941 		return -EIO;
942 
943 	if (enc)
944 		atomic_long_sub(numpages, &nr_shared);
945 	else
946 		atomic_long_add(numpages, &nr_shared);
947 
948 	return 0;
949 }
950 
951 /* Stop new private<->shared conversions */
952 static void tdx_kexec_begin(void)
953 {
954 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
955 		return;
956 
957 	/*
958 	 * Crash kernel reaches here with interrupts disabled: can't wait for
959 	 * conversions to finish.
960 	 *
961 	 * If race happened, just report and proceed.
962 	 */
963 	if (!set_memory_enc_stop_conversion())
964 		pr_warn("Failed to stop shared<->private conversions\n");
965 }
966 
967 /* Walk direct mapping and convert all shared memory back to private */
968 static void tdx_kexec_finish(void)
969 {
970 	unsigned long addr, end;
971 	long found = 0, shared;
972 
973 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
974 		return;
975 
976 	lockdep_assert_irqs_disabled();
977 
978 	addr = PAGE_OFFSET;
979 	end  = PAGE_OFFSET + get_max_mapped();
980 
981 	while (addr < end) {
982 		unsigned long size;
983 		unsigned int level;
984 		pte_t *pte;
985 
986 		pte = lookup_address(addr, &level);
987 		size = page_level_size(level);
988 
989 		if (pte && pte_decrypted(*pte)) {
990 			int pages = size / PAGE_SIZE;
991 
992 			/*
993 			 * Touching memory with shared bit set triggers implicit
994 			 * conversion to shared.
995 			 *
996 			 * Make sure nobody touches the shared range from
997 			 * now on.
998 			 */
999 			set_pte(pte, __pte(0));
1000 
1001 			/*
1002 			 * Memory encryption state persists across kexec.
1003 			 * If tdx_enc_status_changed() fails in the first
1004 			 * kernel, it leaves memory in an unknown state.
1005 			 *
1006 			 * If that memory remains shared, accessing it in the
1007 			 * *next* kernel through a private mapping will result
1008 			 * in an unrecoverable guest shutdown.
1009 			 *
1010 			 * The kdump kernel boot is not impacted as it uses
1011 			 * a pre-reserved memory range that is always private.
1012 			 * However, gathering crash information could lead to
1013 			 * a crash if it accesses unconverted memory through
1014 			 * a private mapping which is possible when accessing
1015 			 * that memory through /proc/vmcore, for example.
1016 			 *
1017 			 * In all cases, print error info in order to leave
1018 			 * enough bread crumbs for debugging.
1019 			 */
1020 			if (!tdx_enc_status_changed(addr, pages, true)) {
1021 				pr_err("Failed to unshare range %#lx-%#lx\n",
1022 				       addr, addr + size);
1023 			}
1024 
1025 			found += pages;
1026 		}
1027 
1028 		addr += size;
1029 	}
1030 
1031 	__flush_tlb_all();
1032 
1033 	shared = atomic_long_read(&nr_shared);
1034 	if (shared != found) {
1035 		pr_err("shared page accounting is off\n");
1036 		pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
1037 	}
1038 }
1039 
1040 static __init void tdx_announce(void)
1041 {
1042 	struct tdx_module_args args = {};
1043 	u64 controls;
1044 
1045 	pr_info("Guest detected\n");
1046 
1047 	tdcall(TDG_VP_INFO, &args);
1048 	tdx_dump_attributes(args.rdx);
1049 
1050 	tdg_vm_rd(TDCS_TD_CTLS, &controls);
1051 	tdx_dump_td_ctls(controls);
1052 }
1053 
1054 void __init tdx_early_init(void)
1055 {
1056 	u64 cc_mask;
1057 	u32 eax, sig[3];
1058 
1059 	cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2],  &sig[1]);
1060 
1061 	if (memcmp(TDX_IDENT, sig, sizeof(sig)))
1062 		return;
1063 
1064 	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
1065 
1066 	/* TSC is the only reliable clock in TDX guest */
1067 	setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
1068 
1069 	cc_vendor = CC_VENDOR_INTEL;
1070 
1071 	/* Configure the TD */
1072 	tdx_setup(&cc_mask);
1073 
1074 	cc_set_mask(cc_mask);
1075 
1076 	/*
1077 	 * All bits above GPA width are reserved and kernel treats shared bit
1078 	 * as flag, not as part of physical address.
1079 	 *
1080 	 * Adjust physical mask to only cover valid GPA bits.
1081 	 */
1082 	physical_mask &= cc_mask - 1;
1083 
1084 	/*
1085 	 * The kernel mapping should match the TDX metadata for the page.
1086 	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
1087 	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
1088 	 * things happen on mismatch:
1089 	 *
1090 	 *   - Private mapping => Shared Page  == Guest shutdown
1091          *   - Shared mapping  => Private Page == Recoverable #VE
1092 	 *
1093 	 * guest.enc_status_change_prepare() converts the page from
1094 	 * shared=>private before the mapping becomes private.
1095 	 *
1096 	 * guest.enc_status_change_finish() converts the page from
1097 	 * private=>shared after the mapping becomes private.
1098 	 *
1099 	 * In both cases there is a temporary shared mapping to a private page,
1100 	 * which can result in a #VE.  But, there is never a private mapping to
1101 	 * a shared page.
1102 	 */
1103 	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
1104 	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
1105 
1106 	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
1107 	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
1108 
1109 	x86_platform.guest.enc_kexec_begin	     = tdx_kexec_begin;
1110 	x86_platform.guest.enc_kexec_finish	     = tdx_kexec_finish;
1111 
1112 	/*
1113 	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
1114 	 * bringup low level code. That raises #VE which cannot be handled
1115 	 * there.
1116 	 *
1117 	 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
1118 	 * implemented separately in the low level startup ASM code.
1119 	 * Until that is in place, disable parallel bringup for TDX.
1120 	 */
1121 	x86_cpuinit.parallel_bringup = false;
1122 
1123 	tdx_announce();
1124 }
1125