xref: /linux/arch/x86/include/asm/sev-internal.h (revision 681e2901330c5c27ce8b58dfdd92a3c339e47caf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #define DR7_RESET_VALUE        0x400
4 
5 extern struct ghcb boot_ghcb_page;
6 extern struct ghcb *boot_ghcb;
7 extern u64 sev_hv_features;
8 
9 /* #VC handler runtime per-CPU data */
10 struct sev_es_runtime_data {
11 	struct ghcb ghcb_page;
12 
13 	/*
14 	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
15 	 * It is needed when an NMI happens while the #VC handler uses the real
16 	 * GHCB, and the NMI handler itself is causing another #VC exception. In
17 	 * that case the GHCB content of the first handler needs to be backed up
18 	 * and restored.
19 	 */
20 	struct ghcb backup_ghcb;
21 
22 	/*
23 	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
24 	 * There is no need for it to be atomic, because nothing is written to
25 	 * the GHCB between the read and the write of ghcb_active. So it is safe
26 	 * to use it when a nested #VC exception happens before the write.
27 	 *
28 	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
29 	 * happens while the first #VC handler uses the GHCB. When the NMI code
30 	 * raises a second #VC handler it might overwrite the contents of the
31 	 * GHCB written by the first handler. To avoid this the content of the
32 	 * GHCB is saved and restored when the GHCB is detected to be in use
33 	 * already.
34 	 */
35 	bool ghcb_active;
36 	bool backup_ghcb_active;
37 
38 	/*
39 	 * Cached DR7 value - write it on DR7 writes and return it on reads.
40 	 * That value will never make it to the real hardware DR7 as debugging
41 	 * is currently unsupported in SEV-ES guests.
42 	 */
43 	unsigned long dr7;
44 };
45 
46 struct ghcb_state {
47 	struct ghcb *ghcb;
48 };
49 
50 extern struct svsm_ca boot_svsm_ca_page;
51 
52 struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
53 void __sev_put_ghcb(struct ghcb_state *state);
54 
55 DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
56 DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
57 
58 void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
59 			   unsigned long npages, enum psc_op op);
60 
61 void __noreturn sev_es_terminate(unsigned int set, unsigned int reason);
62 
63 DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
64 DECLARE_PER_CPU(u64, svsm_caa_pa);
65 
66 extern struct svsm_ca *boot_svsm_caa;
67 extern u64 boot_svsm_caa_pa;
68 
69 static __always_inline struct svsm_ca *svsm_get_caa(void)
70 {
71 	if (sev_cfg.use_cas)
72 		return this_cpu_read(svsm_caa);
73 	else
74 		return boot_svsm_caa;
75 }
76 
77 static __always_inline u64 svsm_get_caa_pa(void)
78 {
79 	if (sev_cfg.use_cas)
80 		return this_cpu_read(svsm_caa_pa);
81 	else
82 		return boot_svsm_caa_pa;
83 }
84 
85 int svsm_perform_call_protocol(struct svsm_call *call);
86 
87 static inline u64 sev_es_rd_ghcb_msr(void)
88 {
89 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
90 }
91 
92 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
93 {
94 	u32 low, high;
95 
96 	low  = (u32)(val);
97 	high = (u32)(val >> 32);
98 
99 	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
100 }
101 
102 enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
103 				   struct es_em_ctxt *ctxt,
104 				   u64 exit_code, u64 exit_info_1,
105 				   u64 exit_info_2);
106 
107 void snp_register_ghcb_early(unsigned long paddr);
108 bool sev_es_negotiate_protocol(void);
109 bool sev_es_check_cpu_features(void);
110 u64 get_hv_features(void);
111 
112 const struct snp_cpuid_table *snp_cpuid_get_table(void);
113