1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_TDX_H
3 #define __KVM_X86_VMX_TDX_H
4
5 #include "tdx_arch.h"
6 #include "tdx_errno.h"
7
8 #ifdef CONFIG_KVM_INTEL_TDX
9 #include "common.h"
10
11 void tdx_hardware_setup(void);
12 int tdx_bringup(void);
13 void tdx_cleanup(void);
14
15 extern bool enable_tdx;
16
17 /* TDX module hardware states. These follow the TDX module OP_STATEs. */
18 enum kvm_tdx_state {
19 TD_STATE_UNINITIALIZED = 0,
20 TD_STATE_INITIALIZED,
21 TD_STATE_RUNNABLE,
22 };
23
24 struct kvm_tdx {
25 struct kvm kvm;
26
27 struct misc_cg *misc_cg;
28 int hkid;
29 enum kvm_tdx_state state;
30
31 u64 attributes;
32 u64 xfam;
33
34 u64 tsc_offset;
35 u64 tsc_multiplier;
36
37 struct tdx_td td;
38
39 /* For KVM_TDX_INIT_MEM_REGION. */
40 atomic64_t nr_premapped;
41
42 /*
43 * Prevent vCPUs from TD entry to ensure SEPT zap related SEAMCALLs do
44 * not contend with tdh_vp_enter() and TDCALLs.
45 * Set/unset is protected with kvm->mmu_lock.
46 */
47 bool wait_for_sept_zap;
48 };
49
50 /* TDX module vCPU states */
51 enum vcpu_tdx_state {
52 VCPU_TD_STATE_UNINITIALIZED = 0,
53 VCPU_TD_STATE_INITIALIZED,
54 };
55
56 struct vcpu_tdx {
57 struct kvm_vcpu vcpu;
58 struct vcpu_vt vt;
59 u64 ext_exit_qualification;
60 gpa_t exit_gpa;
61 struct tdx_module_args vp_enter_args;
62
63 struct tdx_vp vp;
64
65 struct list_head cpu_list;
66
67 u64 vp_enter_ret;
68
69 enum vcpu_tdx_state state;
70 bool guest_entered;
71
72 u64 map_gpa_next;
73 u64 map_gpa_end;
74 };
75
76 void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
77 void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
78 u64 val, u64 err);
79
td_tdcs_exec_read64(struct kvm_tdx * kvm_tdx,u32 field)80 static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
81 {
82 u64 err, data;
83
84 err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);
85 if (unlikely(err)) {
86 pr_err("TDH_MNG_RD[EXEC.0x%x] failed: 0x%llx\n", field, err);
87 return 0;
88 }
89 return data;
90 }
91
tdvps_vmcs_check(u32 field,u8 bits)92 static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
93 {
94 #define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL
95 #define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL
96 #define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL
97 #define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK)
98
99 /* TDX is 64bit only. HIGH field isn't supported. */
100 BUILD_BUG_ON_MSG(__builtin_constant_p(field) &&
101 VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH,
102 "Read/Write to TD VMCS *_HIGH fields not supported");
103
104 BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
105
106 #define VMCS_ENC_WIDTH_MASK GENMASK(14, 13)
107 #define VMCS_ENC_WIDTH_16BIT (0UL << 13)
108 #define VMCS_ENC_WIDTH_64BIT (1UL << 13)
109 #define VMCS_ENC_WIDTH_32BIT (2UL << 13)
110 #define VMCS_ENC_WIDTH_NATURAL (3UL << 13)
111 #define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK)
112
113 /* TDX is 64bit only. i.e. natural width = 64bit. */
114 BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
115 (VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT ||
116 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL),
117 "Invalid TD VMCS access for 64-bit field");
118 BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
119 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT,
120 "Invalid TD VMCS access for 32-bit field");
121 BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
122 VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT,
123 "Invalid TD VMCS access for 16-bit field");
124 }
125
tdvps_management_check(u64 field,u8 bits)126 static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
tdvps_state_non_arch_check(u64 field,u8 bits)127 static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
128
129 #define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
130 static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
131 u32 field) \
132 { \
133 u64 err, data; \
134 \
135 tdvps_##lclass##_check(field, bits); \
136 err = tdh_vp_rd(&tdx->vp, TDVPS_##uclass(field), &data); \
137 if (unlikely(err)) { \
138 tdh_vp_rd_failed(tdx, #uclass, field, err); \
139 return 0; \
140 } \
141 return (u##bits)data; \
142 } \
143 static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
144 u32 field, u##bits val) \
145 { \
146 u64 err; \
147 \
148 tdvps_##lclass##_check(field, bits); \
149 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), val, \
150 GENMASK_ULL(bits - 1, 0)); \
151 if (unlikely(err)) \
152 tdh_vp_wr_failed(tdx, #uclass, " = ", field, (u64)val, err); \
153 } \
154 static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
155 u32 field, u64 bit) \
156 { \
157 u64 err; \
158 \
159 tdvps_##lclass##_check(field, bits); \
160 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), bit, bit); \
161 if (unlikely(err)) \
162 tdh_vp_wr_failed(tdx, #uclass, " |= ", field, bit, err); \
163 } \
164 static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
165 u32 field, u64 bit) \
166 { \
167 u64 err; \
168 \
169 tdvps_##lclass##_check(field, bits); \
170 err = tdh_vp_wr(&tdx->vp, TDVPS_##uclass(field), 0, bit); \
171 if (unlikely(err)) \
172 tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
173 }
174
175
176 bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
177 int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err);
178
179 TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
180 TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
181 TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
182
183 TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
184 TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
185
186 #else
tdx_bringup(void)187 static inline int tdx_bringup(void) { return 0; }
tdx_cleanup(void)188 static inline void tdx_cleanup(void) {}
189
190 #define enable_tdx 0
191
192 struct kvm_tdx {
193 struct kvm kvm;
194 };
195
196 struct vcpu_tdx {
197 struct kvm_vcpu vcpu;
198 };
199
tdx_interrupt_allowed(struct kvm_vcpu * vcpu)200 static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
tdx_complete_emulated_msr(struct kvm_vcpu * vcpu,int err)201 static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; }
202
203 #endif
204
205 #endif
206