1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2023 Intel Corporation. */
3 #ifndef _LINUX_CXL_EVENT_H
4 #define _LINUX_CXL_EVENT_H
5
6 #include <linux/types.h>
7 #include <linux/uuid.h>
8 #include <linux/workqueue_types.h>
9
10 /*
11 * Common Event Record Format
12 * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
13 */
14 struct cxl_event_record_hdr {
15 u8 length;
16 u8 flags[3];
17 __le16 handle;
18 __le16 related_handle;
19 __le64 timestamp;
20 u8 maint_op_class;
21 u8 maint_op_sub_class;
22 __le16 ld_id;
23 u8 head_id;
24 u8 reserved[11];
25 } __packed;
26
27 struct cxl_event_media_hdr {
28 struct cxl_event_record_hdr hdr;
29 __le64 phys_addr;
30 u8 descriptor;
31 u8 type;
32 u8 transaction_type;
33 /*
34 * The meaning of Validity Flags from bit 2 is
35 * different across DRAM and General Media records
36 */
37 u8 validity_flags[2];
38 u8 channel;
39 u8 rank;
40 } __packed;
41
42 #define CXL_EVENT_RECORD_DATA_LENGTH 0x50
43 struct cxl_event_generic {
44 struct cxl_event_record_hdr hdr;
45 u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
46 } __packed;
47
48 /*
49 * General Media Event Record
50 * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
51 */
52 #define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
53 struct cxl_event_gen_media {
54 struct cxl_event_media_hdr media_hdr;
55 u8 device[3];
56 u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
57 u8 cme_threshold_ev_flags;
58 u8 cme_count[3];
59 u8 sub_type;
60 u8 reserved[41];
61 } __packed;
62
63 /*
64 * DRAM Event Record - DER
65 * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
66 */
67 #define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
68 struct cxl_event_dram {
69 struct cxl_event_media_hdr media_hdr;
70 u8 nibble_mask[3];
71 u8 bank_group;
72 u8 bank;
73 u8 row[3];
74 u8 column[2];
75 u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
76 u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
77 u8 sub_channel;
78 u8 cme_threshold_ev_flags;
79 u8 cvme_count[3];
80 u8 sub_type;
81 u8 reserved;
82 } __packed;
83
84 /*
85 * Get Health Info Record
86 * CXL rev 3.1 section 8.2.9.9.3.1; Table 8-133
87 */
88 struct cxl_get_health_info {
89 u8 health_status;
90 u8 media_status;
91 u8 add_status;
92 u8 life_used;
93 u8 device_temp[2];
94 u8 dirty_shutdown_cnt[4];
95 u8 cor_vol_err_cnt[4];
96 u8 cor_per_err_cnt[4];
97 } __packed;
98
99 /*
100 * Memory Module Event Record
101 * CXL rev 3.1 section 8.2.9.2.1.3; Table 8-47
102 */
103 struct cxl_event_mem_module {
104 struct cxl_event_record_hdr hdr;
105 u8 event_type;
106 struct cxl_get_health_info info;
107 u8 validity_flags[2];
108 u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
109 u8 event_sub_type;
110 u8 reserved[0x2a];
111 } __packed;
112
113 /*
114 * Memory Sparing Event Record - MSER
115 * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
116 */
117 struct cxl_event_mem_sparing {
118 struct cxl_event_record_hdr hdr;
119 /*
120 * The fields maintenance operation class and maintenance operation
121 * subclass defined in the Memory Sparing Event Record are the
122 * duplication of the same in the common event record. Thus defined
123 * as reserved and to be removed after the spec correction.
124 */
125 u8 rsv1;
126 u8 rsv2;
127 u8 flags;
128 u8 result;
129 __le16 validity_flags;
130 u8 reserved1[6];
131 __le16 res_avail;
132 u8 channel;
133 u8 rank;
134 u8 nibble_mask[3];
135 u8 bank_group;
136 u8 bank;
137 u8 row[3];
138 __le16 column;
139 u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
140 u8 sub_channel;
141 u8 reserved2[0x25];
142 } __packed;
143
144 union cxl_event {
145 struct cxl_event_generic generic;
146 struct cxl_event_gen_media gen_media;
147 struct cxl_event_dram dram;
148 struct cxl_event_mem_module mem_module;
149 struct cxl_event_mem_sparing mem_sparing;
150 /* dram & gen_media event header */
151 struct cxl_event_media_hdr media_hdr;
152 } __packed;
153
154 /*
155 * Common Event Record Format; in event logs
156 * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
157 */
158 struct cxl_event_record_raw {
159 uuid_t id;
160 union cxl_event event;
161 } __packed;
162
163 enum cxl_event_type {
164 CXL_CPER_EVENT_GENERIC,
165 CXL_CPER_EVENT_GEN_MEDIA,
166 CXL_CPER_EVENT_DRAM,
167 CXL_CPER_EVENT_MEM_MODULE,
168 CXL_CPER_EVENT_MEM_SPARING,
169 };
170
171 #define CPER_CXL_DEVICE_ID_VALID BIT(0)
172 #define CPER_CXL_DEVICE_SN_VALID BIT(1)
173 #define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
174 struct cxl_cper_event_rec {
175 struct {
176 u32 length;
177 u64 validation_bits;
178 struct cper_cxl_event_devid {
179 u16 vendor_id;
180 u16 device_id;
181 u8 func_num;
182 u8 device_num;
183 u8 bus_num;
184 u16 segment_num;
185 u16 slot_num; /* bits 2:0 reserved */
186 u8 reserved;
187 } __packed device_id;
188 struct cper_cxl_event_sn {
189 u32 lower_dw;
190 u32 upper_dw;
191 } __packed dev_serial_num;
192 } __packed hdr;
193
194 union cxl_event event;
195 } __packed;
196
197 struct cxl_cper_work_data {
198 enum cxl_event_type event_type;
199 struct cxl_cper_event_rec rec;
200 };
201
202 #define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
203 #define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
204 #define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
205 #define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
206 #define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
207 #define PROT_ERR_VALID_DVSEC BIT_ULL(5)
208 #define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
209
210 /*
211 * The layout of the enumeration and the values matches CXL Agent Type
212 * field in the UEFI 2.10 Section N.2.13,
213 */
214 enum {
215 RCD, /* Restricted CXL Device */
216 RCH_DP, /* Restricted CXL Host Downstream Port */
217 DEVICE, /* CXL Device */
218 LD, /* CXL Logical Device */
219 FMLD, /* CXL Fabric Manager managed Logical Device */
220 RP, /* CXL Root Port */
221 DSP, /* CXL Downstream Switch Port */
222 USP, /* CXL Upstream Switch Port */
223 };
224
225 #pragma pack(1)
226
227 /* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
228 struct cxl_cper_sec_prot_err {
229 u64 valid_bits;
230 u8 agent_type;
231 u8 reserved[7];
232
233 /*
234 * Except for RCH Downstream Port, all the remaining CXL Agent
235 * types are uniquely identified by the PCIe compatible SBDF number.
236 */
237 union {
238 u64 rcrb_base_addr;
239 struct {
240 u8 function;
241 u8 device;
242 u8 bus;
243 u16 segment;
244 u8 reserved_1[3];
245 };
246 } agent_addr;
247
248 struct {
249 u16 vendor_id;
250 u16 device_id;
251 u16 subsystem_vendor_id;
252 u16 subsystem_id;
253 u8 class_code[2];
254 u16 slot;
255 u8 reserved_1[4];
256 } device_id;
257
258 struct {
259 u32 lower_dw;
260 u32 upper_dw;
261 } dev_serial_num;
262
263 u8 capability[60];
264 u16 dvsec_len;
265 u16 err_len;
266 u8 reserved_2[4];
267 };
268
269 #pragma pack()
270
271 /* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
272 struct cxl_ras_capability_regs {
273 u32 uncor_status;
274 u32 uncor_mask;
275 u32 uncor_severity;
276 u32 cor_status;
277 u32 cor_mask;
278 u32 cap_control;
279 u32 header_log[16];
280 };
281
282 struct cxl_cper_prot_err_work_data {
283 struct cxl_cper_sec_prot_err prot_err;
284 struct cxl_ras_capability_regs ras_cap;
285 int severity;
286 };
287
288 #ifdef CONFIG_ACPI_APEI_GHES
289 int cxl_cper_register_work(struct work_struct *work);
290 int cxl_cper_unregister_work(struct work_struct *work);
291 int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
292 int cxl_cper_register_prot_err_work(struct work_struct *work);
293 int cxl_cper_unregister_prot_err_work(struct work_struct *work);
294 int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd);
295 #else
cxl_cper_register_work(struct work_struct * work)296 static inline int cxl_cper_register_work(struct work_struct *work)
297 {
298 return 0;
299 }
300
cxl_cper_unregister_work(struct work_struct * work)301 static inline int cxl_cper_unregister_work(struct work_struct *work)
302 {
303 return 0;
304 }
cxl_cper_kfifo_get(struct cxl_cper_work_data * wd)305 static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
306 {
307 return 0;
308 }
cxl_cper_register_prot_err_work(struct work_struct * work)309 static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
310 {
311 return 0;
312 }
cxl_cper_unregister_prot_err_work(struct work_struct * work)313 static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
314 {
315 return 0;
316 }
cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data * wd)317 static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
318 {
319 return 0;
320 }
321 #endif
322
323 #endif /* _LINUX_CXL_EVENT_H */
324