1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Extended Error Log driver
4 *
5 * Copyright (C) 2013 Intel Corp.
6 * Author: Chen, Gong <gong.chen@intel.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/acpi.h>
11 #include <linux/cper.h>
12 #include <linux/ratelimit.h>
13 #include <linux/edac.h>
14 #include <linux/ras.h>
15 #include <cxl/event.h>
16 #include <acpi/ghes.h>
17 #include <asm/cpu.h>
18 #include <asm/mce.h>
19 #include <asm/msr.h>
20
21 #include "apei/apei-internal.h"
22 #include <ras/ras_event.h>
23
24 #define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */
25
26 #define EXTLOG_DSM_REV 0x0
27 #define EXTLOG_FN_ADDR 0x1
28
29 #define FLAG_OS_OPTIN BIT(0)
30 #define ELOG_ENTRY_VALID (1ULL<<63)
31 #define ELOG_ENTRY_LEN 0x1000
32
33 #define EMCA_BUG \
34 "Can not request iomem region <0x%016llx-0x%016llx> - eMCA disabled\n"
35
36 struct extlog_l1_head {
37 u32 ver; /* Header Version */
38 u32 hdr_len; /* Header Length */
39 u64 total_len; /* entire L1 Directory length including this header */
40 u64 elog_base; /* MCA Error Log Directory base address */
41 u64 elog_len; /* MCA Error Log Directory length */
42 u32 flags; /* bit 0 - OS/VMM Opt-in */
43 u8 rev0[12];
44 u32 entries; /* Valid L1 Directory entries per logical processor */
45 u8 rev1[12];
46 };
47
48 static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295";
49
50 /* L1 table related physical address */
51 static u64 elog_base;
52 static size_t elog_size;
53 static u64 l1_dirbase;
54 static size_t l1_size;
55
56 /* L1 table related virtual address */
57 static void __iomem *extlog_l1_addr;
58 static void __iomem *elog_addr;
59
60 static void *elog_buf;
61
62 static u64 *l1_entry_base;
63 static u32 l1_percpu_entry;
64
65 #define ELOG_IDX(cpu, bank) \
66 (cpu_physical_id(cpu) * l1_percpu_entry + (bank))
67
68 #define ELOG_ENTRY_DATA(idx) \
69 (*(l1_entry_base + (idx)))
70
71 #define ELOG_ENTRY_ADDR(phyaddr) \
72 (phyaddr - elog_base + (u8 *)elog_addr)
73
extlog_elog_entry_check(int cpu,int bank)74 static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
75 {
76 int idx;
77 u64 data;
78 struct acpi_hest_generic_status *estatus;
79
80 WARN_ON(cpu < 0);
81 idx = ELOG_IDX(cpu, bank);
82 data = ELOG_ENTRY_DATA(idx);
83 if ((data & ELOG_ENTRY_VALID) == 0)
84 return NULL;
85
86 data &= EXT_ELOG_ENTRY_MASK;
87 estatus = (struct acpi_hest_generic_status *)ELOG_ENTRY_ADDR(data);
88
89 /* if no valid data in elog entry, just return */
90 if (estatus->block_status == 0)
91 return NULL;
92
93 return estatus;
94 }
95
__print_extlog_rcd(const char * pfx,struct acpi_hest_generic_status * estatus,int cpu)96 static void __print_extlog_rcd(const char *pfx,
97 struct acpi_hest_generic_status *estatus, int cpu)
98 {
99 static atomic_t seqno;
100 unsigned int curr_seqno;
101 char pfx_seq[64];
102
103 if (!pfx) {
104 if (estatus->error_severity <= CPER_SEV_CORRECTED)
105 pfx = KERN_INFO;
106 else
107 pfx = KERN_ERR;
108 }
109 curr_seqno = atomic_inc_return(&seqno);
110 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}", pfx, curr_seqno);
111 printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
112 cper_estatus_print(pfx_seq, estatus);
113 }
114
print_extlog_rcd(const char * pfx,struct acpi_hest_generic_status * estatus,int cpu)115 static int print_extlog_rcd(const char *pfx,
116 struct acpi_hest_generic_status *estatus, int cpu)
117 {
118 /* Not more than 2 messages every 5 seconds */
119 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
120 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
121 struct ratelimit_state *ratelimit;
122
123 if (estatus->error_severity == CPER_SEV_CORRECTED ||
124 (estatus->error_severity == CPER_SEV_INFORMATIONAL))
125 ratelimit = &ratelimit_corrected;
126 else
127 ratelimit = &ratelimit_uncorrected;
128 if (__ratelimit(ratelimit)) {
129 __print_extlog_rcd(pfx, estatus, cpu);
130 return 0;
131 }
132
133 return 1;
134 }
135
extlog_print_pcie(struct cper_sec_pcie * pcie_err,int severity)136 static void extlog_print_pcie(struct cper_sec_pcie *pcie_err,
137 int severity)
138 {
139 #ifdef ACPI_APEI_PCIEAER
140 struct aer_capability_regs *aer;
141 struct pci_dev *pdev;
142 unsigned int devfn;
143 unsigned int bus;
144 int aer_severity;
145 int domain;
146
147 if (!(pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
148 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO))
149 return;
150
151 aer_severity = cper_severity_to_aer(severity);
152 aer = (struct aer_capability_regs *)pcie_err->aer_info;
153 domain = pcie_err->device_id.segment;
154 bus = pcie_err->device_id.bus;
155 devfn = PCI_DEVFN(pcie_err->device_id.device,
156 pcie_err->device_id.function);
157 pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
158 if (!pdev)
159 return;
160
161 pci_print_aer(pdev, aer_severity, aer);
162 pci_dev_put(pdev);
163 #endif
164 }
165
166 static void
extlog_cxl_cper_handle_prot_err(struct cxl_cper_sec_prot_err * prot_err,int severity)167 extlog_cxl_cper_handle_prot_err(struct cxl_cper_sec_prot_err *prot_err,
168 int severity)
169 {
170 #ifdef ACPI_APEI_PCIEAER
171 struct cxl_cper_prot_err_work_data wd;
172
173 if (cxl_cper_sec_prot_err_valid(prot_err))
174 return;
175
176 if (cxl_cper_setup_prot_err_work_data(&wd, prot_err, severity))
177 return;
178
179 cxl_cper_handle_prot_err(&wd);
180 #endif
181 }
182
extlog_print(struct notifier_block * nb,unsigned long val,void * data)183 static int extlog_print(struct notifier_block *nb, unsigned long val,
184 void *data)
185 {
186 struct mce *mce = (struct mce *)data;
187 int bank = mce->bank;
188 int cpu = mce->extcpu;
189 struct acpi_hest_generic_status *estatus, *tmp;
190 struct acpi_hest_generic_data *gdata;
191 const guid_t *fru_id;
192 char *fru_text;
193 guid_t *sec_type;
194 static u32 err_seq;
195
196 estatus = extlog_elog_entry_check(cpu, bank);
197 if (!estatus)
198 return NOTIFY_DONE;
199
200 if (mce->kflags & MCE_HANDLED_CEC) {
201 estatus->block_status = 0;
202 return NOTIFY_DONE;
203 }
204
205 memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
206 /* clear record status to enable BIOS to update it again */
207 estatus->block_status = 0;
208
209 tmp = (struct acpi_hest_generic_status *)elog_buf;
210
211 if (!ras_userspace_consumers()) {
212 print_extlog_rcd(NULL, tmp, cpu);
213 goto out;
214 }
215
216 /* log event via trace */
217 err_seq++;
218 apei_estatus_for_each_section(tmp, gdata) {
219 if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
220 fru_id = (guid_t *)gdata->fru_id;
221 else
222 fru_id = &guid_null;
223 if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
224 fru_text = gdata->fru_text;
225 else
226 fru_text = "";
227 sec_type = (guid_t *)gdata->section_type;
228 if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
229 struct cper_sec_mem_err *mem = acpi_hest_get_payload(gdata);
230
231 if (gdata->error_data_length >= sizeof(*mem))
232 trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
233 (u8)gdata->error_severity);
234 } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
235 struct cxl_cper_sec_prot_err *prot_err =
236 acpi_hest_get_payload(gdata);
237
238 extlog_cxl_cper_handle_prot_err(prot_err,
239 gdata->error_severity);
240 } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
241 struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
242
243 extlog_print_pcie(pcie_err, gdata->error_severity);
244 } else {
245 void *err = acpi_hest_get_payload(gdata);
246
247 log_non_standard_event(sec_type, fru_id, fru_text,
248 gdata->error_severity, err,
249 gdata->error_data_length);
250 }
251 }
252
253 out:
254 mce->kflags |= MCE_HANDLED_EXTLOG;
255 return NOTIFY_OK;
256 }
257
extlog_get_l1addr(void)258 static bool __init extlog_get_l1addr(void)
259 {
260 guid_t guid;
261 acpi_handle handle;
262 union acpi_object *obj;
263
264 if (guid_parse(extlog_dsm_uuid, &guid))
265 return false;
266 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
267 return false;
268 if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
269 return false;
270 obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
271 EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
272 if (!obj) {
273 return false;
274 } else {
275 l1_dirbase = obj->integer.value;
276 ACPI_FREE(obj);
277 }
278
279 /* Spec says L1 directory must be 4K aligned, bail out if it isn't */
280 if (l1_dirbase & ((1 << 12) - 1)) {
281 pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n",
282 l1_dirbase);
283 return false;
284 }
285
286 return true;
287 }
288 static struct notifier_block extlog_mce_dec = {
289 .notifier_call = extlog_print,
290 .priority = MCE_PRIO_EXTLOG,
291 };
292
extlog_init(void)293 static int __init extlog_init(void)
294 {
295 struct extlog_l1_head *l1_head;
296 void __iomem *extlog_l1_hdr;
297 size_t l1_hdr_size;
298 struct resource *r;
299 u64 cap;
300 int rc;
301
302 if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
303 !(cap & MCG_ELOG_P) ||
304 !extlog_get_l1addr())
305 return -ENODEV;
306
307 rc = -EINVAL;
308 /* get L1 header to fetch necessary information */
309 l1_hdr_size = sizeof(struct extlog_l1_head);
310 r = request_mem_region(l1_dirbase, l1_hdr_size, "L1 DIR HDR");
311 if (!r) {
312 pr_warn(FW_BUG EMCA_BUG,
313 (unsigned long long)l1_dirbase,
314 (unsigned long long)l1_dirbase + l1_hdr_size);
315 goto err;
316 }
317
318 extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
319 if (!extlog_l1_hdr) {
320 rc = -ENOMEM;
321 goto err_release_l1_hdr;
322 }
323 l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
324 l1_size = l1_head->total_len;
325 l1_percpu_entry = l1_head->entries;
326 elog_base = l1_head->elog_base;
327 elog_size = l1_head->elog_len;
328 acpi_os_unmap_iomem(extlog_l1_hdr, l1_hdr_size);
329 release_mem_region(l1_dirbase, l1_hdr_size);
330
331 /* remap L1 header again based on completed information */
332 r = request_mem_region(l1_dirbase, l1_size, "L1 Table");
333 if (!r) {
334 pr_warn(FW_BUG EMCA_BUG,
335 (unsigned long long)l1_dirbase,
336 (unsigned long long)l1_dirbase + l1_size);
337 goto err;
338 }
339 extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
340 if (!extlog_l1_addr) {
341 rc = -ENOMEM;
342 goto err_release_l1_dir;
343 }
344 l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
345
346 /* remap elog table */
347 r = request_mem_region(elog_base, elog_size, "Elog Table");
348 if (!r) {
349 pr_warn(FW_BUG EMCA_BUG,
350 (unsigned long long)elog_base,
351 (unsigned long long)elog_base + elog_size);
352 goto err_release_l1_dir;
353 }
354 elog_addr = acpi_os_map_iomem(elog_base, elog_size);
355 if (!elog_addr) {
356 rc = -ENOMEM;
357 goto err_release_elog;
358 }
359
360 rc = -ENOMEM;
361 /* allocate buffer to save elog record */
362 elog_buf = kmalloc(ELOG_ENTRY_LEN, GFP_KERNEL);
363 if (elog_buf == NULL)
364 goto err_release_elog;
365
366 mce_register_decode_chain(&extlog_mce_dec);
367 /* enable OS to be involved to take over management from BIOS */
368 ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
369
370 return 0;
371
372 err_release_elog:
373 if (elog_addr)
374 acpi_os_unmap_iomem(elog_addr, elog_size);
375 release_mem_region(elog_base, elog_size);
376 err_release_l1_dir:
377 if (extlog_l1_addr)
378 acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
379 release_mem_region(l1_dirbase, l1_size);
380 err_release_l1_hdr:
381 release_mem_region(l1_dirbase, l1_hdr_size);
382 err:
383 pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
384 return rc;
385 }
386
extlog_exit(void)387 static void __exit extlog_exit(void)
388 {
389 mce_unregister_decode_chain(&extlog_mce_dec);
390 if (extlog_l1_addr) {
391 ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
392 acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
393 }
394 if (elog_addr)
395 acpi_os_unmap_iomem(elog_addr, elog_size);
396 release_mem_region(elog_base, elog_size);
397 release_mem_region(l1_dirbase, l1_size);
398 kfree(elog_buf);
399 }
400
401 module_init(extlog_init);
402 module_exit(extlog_exit);
403
404 MODULE_AUTHOR("Chen, Gong <gong.chen@intel.com>");
405 MODULE_DESCRIPTION("Extended MCA Error Log Driver");
406 MODULE_LICENSE("GPL");
407