1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2019 SUSE
6 *
7 * Author: Joerg Roedel <jroedel@suse.de>
8 */
9
10 #define pr_fmt(fmt) "SEV: " fmt
11
12 #include <linux/bug.h>
13 #include <linux/kernel.h>
14
15 #include <asm/cpu_entry_area.h>
16 #include <asm/msr.h>
17 #include <asm/ptrace.h>
18 #include <asm/sev.h>
19 #include <asm/sev-internal.h>
20
on_vc_stack(struct pt_regs * regs)21 static __always_inline bool on_vc_stack(struct pt_regs *regs)
22 {
23 unsigned long sp = regs->sp;
24
25 /* User-mode RSP is not trusted */
26 if (user_mode(regs))
27 return false;
28
29 /* SYSCALL gap still has user-mode RSP */
30 if (ip_within_syscall_gap(regs))
31 return false;
32
33 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
34 }
35
36 /*
37 * This function handles the case when an NMI is raised in the #VC
38 * exception handler entry code, before the #VC handler has switched off
39 * its IST stack. In this case, the IST entry for #VC must be adjusted,
40 * so that any nested #VC exception will not overwrite the stack
41 * contents of the interrupted #VC handler.
42 *
43 * The IST entry is adjusted unconditionally so that it can be also be
44 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
45 * nested sev_es_ist_exit() call may adjust back the IST entry too
46 * early.
47 *
48 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
49 * on the NMI IST stack, as they are only called from NMI handling code
50 * right now.
51 */
__sev_es_ist_enter(struct pt_regs * regs)52 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
53 {
54 unsigned long old_ist, new_ist;
55
56 /* Read old IST entry */
57 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
58
59 /*
60 * If NMI happened while on the #VC IST stack, set the new IST
61 * value below regs->sp, so that the interrupted stack frame is
62 * not overwritten by subsequent #VC exceptions.
63 */
64 if (on_vc_stack(regs))
65 new_ist = regs->sp;
66
67 /*
68 * Reserve additional 8 bytes and store old IST value so this
69 * adjustment can be unrolled in __sev_es_ist_exit().
70 */
71 new_ist -= sizeof(old_ist);
72 *(unsigned long *)new_ist = old_ist;
73
74 /* Set new IST entry */
75 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
76 }
77
__sev_es_ist_exit(void)78 void noinstr __sev_es_ist_exit(void)
79 {
80 unsigned long ist;
81
82 /* Read IST entry */
83 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
84
85 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
86 return;
87
88 /* Read back old IST entry and write it to the TSS */
89 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
90 }
91
__sev_es_nmi_complete(void)92 void noinstr __sev_es_nmi_complete(void)
93 {
94 struct ghcb_state state;
95 struct ghcb *ghcb;
96
97 ghcb = __sev_get_ghcb(&state);
98
99 vc_ghcb_invalidate(ghcb);
100 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
101 ghcb_set_sw_exit_info_1(ghcb, 0);
102 ghcb_set_sw_exit_info_2(ghcb, 0);
103
104 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
105 VMGEXIT();
106
107 __sev_put_ghcb(&state);
108 }
109