1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42
43 #ifdef CONFIG_X86_MCE_AMD
44 #include <asm/mce.h>
45
46 static bool notifier_registered;
47 #endif
48 static const char *RAS_FS_NAME = "ras";
49
50 const char *ras_error_string[] = {
51 "none",
52 "parity",
53 "single_correctable",
54 "multi_uncorrectable",
55 "poison",
56 };
57
58 const char *ras_block_string[] = {
59 "umc",
60 "sdma",
61 "gfx",
62 "mmhub",
63 "athub",
64 "pcie_bif",
65 "hdp",
66 "xgmi_wafl",
67 "df",
68 "smn",
69 "sem",
70 "mp0",
71 "mp1",
72 "fuse",
73 "mca",
74 "vcn",
75 "jpeg",
76 };
77
78 const char *ras_mca_block_string[] = {
79 "mca_mp0",
80 "mca_mp1",
81 "mca_mpio",
82 "mca_iohc",
83 };
84
85 struct amdgpu_ras_block_list {
86 /* ras block link */
87 struct list_head node;
88
89 struct amdgpu_ras_block_object *ras_obj;
90 };
91
get_ras_block_str(struct ras_common_if * ras_block)92 const char *get_ras_block_str(struct ras_common_if *ras_block)
93 {
94 if (!ras_block)
95 return "NULL";
96
97 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
98 return "OUT OF RANGE";
99
100 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
101 return ras_mca_block_string[ras_block->sub_block_index];
102
103 return ras_block_string[ras_block->block];
104 }
105
106 #define ras_block_str(_BLOCK_) \
107 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
108
109 #define ras_err_str(i) (ras_error_string[ffs(i)])
110
111 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
112
113 /* inject address is 52 bits */
114 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
115
116 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
117 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
118
119 enum amdgpu_ras_retire_page_reservation {
120 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
121 AMDGPU_RAS_RETIRE_PAGE_PENDING,
122 AMDGPU_RAS_RETIRE_PAGE_FAULT,
123 };
124
125 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
126
127 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
128 uint64_t addr);
129 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
130 uint64_t addr);
131 #ifdef CONFIG_X86_MCE_AMD
132 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
133 struct mce_notifier_adev_list {
134 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
135 int num_gpu;
136 };
137 static struct mce_notifier_adev_list mce_adev_list;
138 #endif
139
amdgpu_ras_set_error_query_ready(struct amdgpu_device * adev,bool ready)140 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
141 {
142 if (adev && amdgpu_ras_get_context(adev))
143 amdgpu_ras_get_context(adev)->error_query_ready = ready;
144 }
145
amdgpu_ras_get_error_query_ready(struct amdgpu_device * adev)146 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
147 {
148 if (adev && amdgpu_ras_get_context(adev))
149 return amdgpu_ras_get_context(adev)->error_query_ready;
150
151 return false;
152 }
153
amdgpu_reserve_page_direct(struct amdgpu_device * adev,uint64_t address)154 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
155 {
156 struct ras_err_data err_data;
157 struct eeprom_table_record err_rec;
158 int ret;
159
160 if ((address >= adev->gmc.mc_vram_size) ||
161 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
162 dev_warn(adev->dev,
163 "RAS WARN: input address 0x%llx is invalid.\n",
164 address);
165 return -EINVAL;
166 }
167
168 if (amdgpu_ras_check_bad_page(adev, address)) {
169 dev_warn(adev->dev,
170 "RAS WARN: 0x%llx has already been marked as bad page!\n",
171 address);
172 return 0;
173 }
174
175 ret = amdgpu_ras_error_data_init(&err_data);
176 if (ret)
177 return ret;
178
179 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
180 err_data.err_addr = &err_rec;
181 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
182
183 if (amdgpu_bad_page_threshold != 0) {
184 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
185 err_data.err_addr_cnt);
186 amdgpu_ras_save_bad_pages(adev, NULL);
187 }
188
189 amdgpu_ras_error_data_fini(&err_data);
190
191 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
192 dev_warn(adev->dev, "Clear EEPROM:\n");
193 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
194
195 return 0;
196 }
197
amdgpu_ras_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * pos)198 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
199 size_t size, loff_t *pos)
200 {
201 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
202 struct ras_query_if info = {
203 .head = obj->head,
204 };
205 ssize_t s;
206 char val[128];
207
208 if (amdgpu_ras_query_error_status(obj->adev, &info))
209 return -EINVAL;
210
211 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
212 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
213 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
214 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
215 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
216 }
217
218 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
219 "ue", info.ue_count,
220 "ce", info.ce_count);
221 if (*pos >= s)
222 return 0;
223
224 s -= *pos;
225 s = min_t(u64, s, size);
226
227
228 if (copy_to_user(buf, &val[*pos], s))
229 return -EINVAL;
230
231 *pos += s;
232
233 return s;
234 }
235
236 static const struct file_operations amdgpu_ras_debugfs_ops = {
237 .owner = THIS_MODULE,
238 .read = amdgpu_ras_debugfs_read,
239 .write = NULL,
240 .llseek = default_llseek
241 };
242
amdgpu_ras_find_block_id_by_name(const char * name,int * block_id)243 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
244 {
245 int i;
246
247 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
248 *block_id = i;
249 if (strcmp(name, ras_block_string[i]) == 0)
250 return 0;
251 }
252 return -EINVAL;
253 }
254
amdgpu_ras_debugfs_ctrl_parse_data(struct file * f,const char __user * buf,size_t size,loff_t * pos,struct ras_debug_if * data)255 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
256 const char __user *buf, size_t size,
257 loff_t *pos, struct ras_debug_if *data)
258 {
259 ssize_t s = min_t(u64, 64, size);
260 char str[65];
261 char block_name[33];
262 char err[9] = "ue";
263 int op = -1;
264 int block_id;
265 uint32_t sub_block;
266 u64 address, value;
267 /* default value is 0 if the mask is not set by user */
268 u32 instance_mask = 0;
269
270 if (*pos)
271 return -EINVAL;
272 *pos = size;
273
274 memset(str, 0, sizeof(str));
275 memset(data, 0, sizeof(*data));
276
277 if (copy_from_user(str, buf, s))
278 return -EINVAL;
279
280 if (sscanf(str, "disable %32s", block_name) == 1)
281 op = 0;
282 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
283 op = 1;
284 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
285 op = 2;
286 else if (strstr(str, "retire_page") != NULL)
287 op = 3;
288 else if (str[0] && str[1] && str[2] && str[3])
289 /* ascii string, but commands are not matched. */
290 return -EINVAL;
291
292 if (op != -1) {
293 if (op == 3) {
294 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
295 sscanf(str, "%*s %llu", &address) != 1)
296 return -EINVAL;
297
298 data->op = op;
299 data->inject.address = address;
300
301 return 0;
302 }
303
304 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
305 return -EINVAL;
306
307 data->head.block = block_id;
308 /* only ue, ce and poison errors are supported */
309 if (!memcmp("ue", err, 2))
310 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
311 else if (!memcmp("ce", err, 2))
312 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
313 else if (!memcmp("poison", err, 6))
314 data->head.type = AMDGPU_RAS_ERROR__POISON;
315 else
316 return -EINVAL;
317
318 data->op = op;
319
320 if (op == 2) {
321 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
322 &sub_block, &address, &value, &instance_mask) != 4 &&
323 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
324 &sub_block, &address, &value, &instance_mask) != 4 &&
325 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
326 &sub_block, &address, &value) != 3 &&
327 sscanf(str, "%*s %*s %*s %u %llu %llu",
328 &sub_block, &address, &value) != 3)
329 return -EINVAL;
330 data->head.sub_block_index = sub_block;
331 data->inject.address = address;
332 data->inject.value = value;
333 data->inject.instance_mask = instance_mask;
334 }
335 } else {
336 if (size < sizeof(*data))
337 return -EINVAL;
338
339 if (copy_from_user(data, buf, sizeof(*data)))
340 return -EINVAL;
341 }
342
343 return 0;
344 }
345
amdgpu_ras_instance_mask_check(struct amdgpu_device * adev,struct ras_debug_if * data)346 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
347 struct ras_debug_if *data)
348 {
349 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
350 uint32_t mask, inst_mask = data->inject.instance_mask;
351
352 /* no need to set instance mask if there is only one instance */
353 if (num_xcc <= 1 && inst_mask) {
354 data->inject.instance_mask = 0;
355 dev_dbg(adev->dev,
356 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
357 inst_mask);
358
359 return;
360 }
361
362 switch (data->head.block) {
363 case AMDGPU_RAS_BLOCK__GFX:
364 mask = GENMASK(num_xcc - 1, 0);
365 break;
366 case AMDGPU_RAS_BLOCK__SDMA:
367 mask = GENMASK(adev->sdma.num_instances - 1, 0);
368 break;
369 case AMDGPU_RAS_BLOCK__VCN:
370 case AMDGPU_RAS_BLOCK__JPEG:
371 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
372 break;
373 default:
374 mask = inst_mask;
375 break;
376 }
377
378 /* remove invalid bits in instance mask */
379 data->inject.instance_mask &= mask;
380 if (inst_mask != data->inject.instance_mask)
381 dev_dbg(adev->dev,
382 "Adjust RAS inject mask 0x%x to 0x%x\n",
383 inst_mask, data->inject.instance_mask);
384 }
385
386 /**
387 * DOC: AMDGPU RAS debugfs control interface
388 *
389 * The control interface accepts struct ras_debug_if which has two members.
390 *
391 * First member: ras_debug_if::head or ras_debug_if::inject.
392 *
393 * head is used to indicate which IP block will be under control.
394 *
395 * head has four members, they are block, type, sub_block_index, name.
396 * block: which IP will be under control.
397 * type: what kind of error will be enabled/disabled/injected.
398 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
399 * name: the name of IP.
400 *
401 * inject has three more members than head, they are address, value and mask.
402 * As their names indicate, inject operation will write the
403 * value to the address.
404 *
405 * The second member: struct ras_debug_if::op.
406 * It has three kinds of operations.
407 *
408 * - 0: disable RAS on the block. Take ::head as its data.
409 * - 1: enable RAS on the block. Take ::head as its data.
410 * - 2: inject errors on the block. Take ::inject as its data.
411 *
412 * How to use the interface?
413 *
414 * In a program
415 *
416 * Copy the struct ras_debug_if in your code and initialize it.
417 * Write the struct to the control interface.
418 *
419 * From shell
420 *
421 * .. code-block:: bash
422 *
423 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
424 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
425 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
426 *
427 * Where N, is the card which you want to affect.
428 *
429 * "disable" requires only the block.
430 * "enable" requires the block and error type.
431 * "inject" requires the block, error type, address, and value.
432 *
433 * The block is one of: umc, sdma, gfx, etc.
434 * see ras_block_string[] for details
435 *
436 * The error type is one of: ue, ce and poison where,
437 * ue is multi-uncorrectable
438 * ce is single-correctable
439 * poison is poison
440 *
441 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
442 * The address and value are hexadecimal numbers, leading 0x is optional.
443 * The mask means instance mask, is optional, default value is 0x1.
444 *
445 * For instance,
446 *
447 * .. code-block:: bash
448 *
449 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
450 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
451 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
452 *
453 * How to check the result of the operation?
454 *
455 * To check disable/enable, see "ras" features at,
456 * /sys/class/drm/card[0/1/2...]/device/ras/features
457 *
458 * To check inject, see the corresponding error count at,
459 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
460 *
461 * .. note::
462 * Operations are only allowed on blocks which are supported.
463 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
464 * to see which blocks support RAS on a particular asic.
465 *
466 */
amdgpu_ras_debugfs_ctrl_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)467 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
468 const char __user *buf,
469 size_t size, loff_t *pos)
470 {
471 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
472 struct ras_debug_if data;
473 int ret = 0;
474
475 if (!amdgpu_ras_get_error_query_ready(adev)) {
476 dev_warn(adev->dev, "RAS WARN: error injection "
477 "currently inaccessible\n");
478 return size;
479 }
480
481 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
482 if (ret)
483 return ret;
484
485 if (data.op == 3) {
486 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
487 if (!ret)
488 return size;
489 else
490 return ret;
491 }
492
493 if (!amdgpu_ras_is_supported(adev, data.head.block))
494 return -EINVAL;
495
496 switch (data.op) {
497 case 0:
498 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
499 break;
500 case 1:
501 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
502 break;
503 case 2:
504 if ((data.inject.address >= adev->gmc.mc_vram_size &&
505 adev->gmc.mc_vram_size) ||
506 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
507 dev_warn(adev->dev, "RAS WARN: input address "
508 "0x%llx is invalid.",
509 data.inject.address);
510 ret = -EINVAL;
511 break;
512 }
513
514 /* umc ce/ue error injection for a bad page is not allowed */
515 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
516 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
517 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
518 "already been marked as bad!\n",
519 data.inject.address);
520 break;
521 }
522
523 amdgpu_ras_instance_mask_check(adev, &data);
524
525 /* data.inject.address is offset instead of absolute gpu address */
526 ret = amdgpu_ras_error_inject(adev, &data.inject);
527 break;
528 default:
529 ret = -EINVAL;
530 break;
531 }
532
533 if (ret)
534 return ret;
535
536 return size;
537 }
538
539 /**
540 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
541 *
542 * Some boards contain an EEPROM which is used to persistently store a list of
543 * bad pages which experiences ECC errors in vram. This interface provides
544 * a way to reset the EEPROM, e.g., after testing error injection.
545 *
546 * Usage:
547 *
548 * .. code-block:: bash
549 *
550 * echo 1 > ../ras/ras_eeprom_reset
551 *
552 * will reset EEPROM table to 0 entries.
553 *
554 */
amdgpu_ras_debugfs_eeprom_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)555 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
556 const char __user *buf,
557 size_t size, loff_t *pos)
558 {
559 struct amdgpu_device *adev =
560 (struct amdgpu_device *)file_inode(f)->i_private;
561 int ret;
562
563 ret = amdgpu_ras_eeprom_reset_table(
564 &(amdgpu_ras_get_context(adev)->eeprom_control));
565
566 if (!ret) {
567 /* Something was written to EEPROM.
568 */
569 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
570 return size;
571 } else {
572 return ret;
573 }
574 }
575
576 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
577 .owner = THIS_MODULE,
578 .read = NULL,
579 .write = amdgpu_ras_debugfs_ctrl_write,
580 .llseek = default_llseek
581 };
582
583 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
584 .owner = THIS_MODULE,
585 .read = NULL,
586 .write = amdgpu_ras_debugfs_eeprom_write,
587 .llseek = default_llseek
588 };
589
590 /**
591 * DOC: AMDGPU RAS sysfs Error Count Interface
592 *
593 * It allows the user to read the error count for each IP block on the gpu through
594 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
595 *
596 * It outputs the multiple lines which report the uncorrected (ue) and corrected
597 * (ce) error counts.
598 *
599 * The format of one line is below,
600 *
601 * [ce|ue]: count
602 *
603 * Example:
604 *
605 * .. code-block:: bash
606 *
607 * ue: 0
608 * ce: 1
609 *
610 */
amdgpu_ras_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)611 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
612 struct device_attribute *attr, char *buf)
613 {
614 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
615 struct ras_query_if info = {
616 .head = obj->head,
617 };
618
619 if (!amdgpu_ras_get_error_query_ready(obj->adev))
620 return sysfs_emit(buf, "Query currently inaccessible\n");
621
622 if (amdgpu_ras_query_error_status(obj->adev, &info))
623 return -EINVAL;
624
625 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
626 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
627 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
628 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
629 }
630
631 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
632 "ce", info.ce_count);
633 }
634
635 /* obj begin */
636
637 #define get_obj(obj) do { (obj)->use++; } while (0)
638 #define alive_obj(obj) ((obj)->use)
639
put_obj(struct ras_manager * obj)640 static inline void put_obj(struct ras_manager *obj)
641 {
642 if (obj && (--obj->use == 0)) {
643 list_del(&obj->node);
644 amdgpu_ras_error_data_fini(&obj->err_data);
645 }
646
647 if (obj && (obj->use < 0))
648 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
649 }
650
651 /* make one obj and return it. */
amdgpu_ras_create_obj(struct amdgpu_device * adev,struct ras_common_if * head)652 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
653 struct ras_common_if *head)
654 {
655 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
656 struct ras_manager *obj;
657
658 if (!adev->ras_enabled || !con)
659 return NULL;
660
661 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
662 return NULL;
663
664 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
665 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
666 return NULL;
667
668 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
669 } else
670 obj = &con->objs[head->block];
671
672 /* already exist. return obj? */
673 if (alive_obj(obj))
674 return NULL;
675
676 if (amdgpu_ras_error_data_init(&obj->err_data))
677 return NULL;
678
679 obj->head = *head;
680 obj->adev = adev;
681 list_add(&obj->node, &con->head);
682 get_obj(obj);
683
684 return obj;
685 }
686
687 /* return an obj equal to head, or the first when head is NULL */
amdgpu_ras_find_obj(struct amdgpu_device * adev,struct ras_common_if * head)688 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
689 struct ras_common_if *head)
690 {
691 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
692 struct ras_manager *obj;
693 int i;
694
695 if (!adev->ras_enabled || !con)
696 return NULL;
697
698 if (head) {
699 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
700 return NULL;
701
702 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
703 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
704 return NULL;
705
706 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
707 } else
708 obj = &con->objs[head->block];
709
710 if (alive_obj(obj))
711 return obj;
712 } else {
713 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
714 obj = &con->objs[i];
715 if (alive_obj(obj))
716 return obj;
717 }
718 }
719
720 return NULL;
721 }
722 /* obj end */
723
724 /* feature ctl begin */
amdgpu_ras_is_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)725 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
726 struct ras_common_if *head)
727 {
728 return adev->ras_hw_enabled & BIT(head->block);
729 }
730
amdgpu_ras_is_feature_enabled(struct amdgpu_device * adev,struct ras_common_if * head)731 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
732 struct ras_common_if *head)
733 {
734 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
735
736 return con->features & BIT(head->block);
737 }
738
739 /*
740 * if obj is not created, then create one.
741 * set feature enable flag.
742 */
__amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,int enable)743 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
744 struct ras_common_if *head, int enable)
745 {
746 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
747 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
748
749 /* If hardware does not support ras, then do not create obj.
750 * But if hardware support ras, we can create the obj.
751 * Ras framework checks con->hw_supported to see if it need do
752 * corresponding initialization.
753 * IP checks con->support to see if it need disable ras.
754 */
755 if (!amdgpu_ras_is_feature_allowed(adev, head))
756 return 0;
757
758 if (enable) {
759 if (!obj) {
760 obj = amdgpu_ras_create_obj(adev, head);
761 if (!obj)
762 return -EINVAL;
763 } else {
764 /* In case we create obj somewhere else */
765 get_obj(obj);
766 }
767 con->features |= BIT(head->block);
768 } else {
769 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
770 con->features &= ~BIT(head->block);
771 put_obj(obj);
772 }
773 }
774
775 return 0;
776 }
777
778 /* wrapper of psp_ras_enable_features */
amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)779 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
780 struct ras_common_if *head, bool enable)
781 {
782 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
783 union ta_ras_cmd_input *info;
784 int ret;
785
786 if (!con)
787 return -EINVAL;
788
789 /* For non-gfx ip, do not enable ras feature if it is not allowed */
790 /* For gfx ip, regardless of feature support status, */
791 /* Force issue enable or disable ras feature commands */
792 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
793 !amdgpu_ras_is_feature_allowed(adev, head))
794 return 0;
795
796 /* Only enable gfx ras feature from host side */
797 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
798 !amdgpu_sriov_vf(adev) &&
799 !amdgpu_ras_intr_triggered()) {
800 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
801 if (!info)
802 return -ENOMEM;
803
804 if (!enable) {
805 info->disable_features = (struct ta_ras_disable_features_input) {
806 .block_id = amdgpu_ras_block_to_ta(head->block),
807 .error_type = amdgpu_ras_error_to_ta(head->type),
808 };
809 } else {
810 info->enable_features = (struct ta_ras_enable_features_input) {
811 .block_id = amdgpu_ras_block_to_ta(head->block),
812 .error_type = amdgpu_ras_error_to_ta(head->type),
813 };
814 }
815
816 ret = psp_ras_enable_features(&adev->psp, info, enable);
817 if (ret) {
818 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
819 enable ? "enable":"disable",
820 get_ras_block_str(head),
821 amdgpu_ras_is_poison_mode_supported(adev), ret);
822 kfree(info);
823 return ret;
824 }
825
826 kfree(info);
827 }
828
829 /* setup the obj */
830 __amdgpu_ras_feature_enable(adev, head, enable);
831
832 return 0;
833 }
834
835 /* Only used in device probe stage and called only once. */
amdgpu_ras_feature_enable_on_boot(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)836 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
837 struct ras_common_if *head, bool enable)
838 {
839 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
840 int ret;
841
842 if (!con)
843 return -EINVAL;
844
845 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
846 if (enable) {
847 /* There is no harm to issue a ras TA cmd regardless of
848 * the currecnt ras state.
849 * If current state == target state, it will do nothing
850 * But sometimes it requests driver to reset and repost
851 * with error code -EAGAIN.
852 */
853 ret = amdgpu_ras_feature_enable(adev, head, 1);
854 /* With old ras TA, we might fail to enable ras.
855 * Log it and just setup the object.
856 * TODO need remove this WA in the future.
857 */
858 if (ret == -EINVAL) {
859 ret = __amdgpu_ras_feature_enable(adev, head, 1);
860 if (!ret)
861 dev_info(adev->dev,
862 "RAS INFO: %s setup object\n",
863 get_ras_block_str(head));
864 }
865 } else {
866 /* setup the object then issue a ras TA disable cmd.*/
867 ret = __amdgpu_ras_feature_enable(adev, head, 1);
868 if (ret)
869 return ret;
870
871 /* gfx block ras dsiable cmd must send to ras-ta */
872 if (head->block == AMDGPU_RAS_BLOCK__GFX)
873 con->features |= BIT(head->block);
874
875 ret = amdgpu_ras_feature_enable(adev, head, 0);
876
877 /* clean gfx block ras features flag */
878 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
879 con->features &= ~BIT(head->block);
880 }
881 } else
882 ret = amdgpu_ras_feature_enable(adev, head, enable);
883
884 return ret;
885 }
886
amdgpu_ras_disable_all_features(struct amdgpu_device * adev,bool bypass)887 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
888 bool bypass)
889 {
890 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
891 struct ras_manager *obj, *tmp;
892
893 list_for_each_entry_safe(obj, tmp, &con->head, node) {
894 /* bypass psp.
895 * aka just release the obj and corresponding flags
896 */
897 if (bypass) {
898 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
899 break;
900 } else {
901 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
902 break;
903 }
904 }
905
906 return con->features;
907 }
908
amdgpu_ras_enable_all_features(struct amdgpu_device * adev,bool bypass)909 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
910 bool bypass)
911 {
912 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
913 int i;
914 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
915
916 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
917 struct ras_common_if head = {
918 .block = i,
919 .type = default_ras_type,
920 .sub_block_index = 0,
921 };
922
923 if (i == AMDGPU_RAS_BLOCK__MCA)
924 continue;
925
926 if (bypass) {
927 /*
928 * bypass psp. vbios enable ras for us.
929 * so just create the obj
930 */
931 if (__amdgpu_ras_feature_enable(adev, &head, 1))
932 break;
933 } else {
934 if (amdgpu_ras_feature_enable(adev, &head, 1))
935 break;
936 }
937 }
938
939 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
940 struct ras_common_if head = {
941 .block = AMDGPU_RAS_BLOCK__MCA,
942 .type = default_ras_type,
943 .sub_block_index = i,
944 };
945
946 if (bypass) {
947 /*
948 * bypass psp. vbios enable ras for us.
949 * so just create the obj
950 */
951 if (__amdgpu_ras_feature_enable(adev, &head, 1))
952 break;
953 } else {
954 if (amdgpu_ras_feature_enable(adev, &head, 1))
955 break;
956 }
957 }
958
959 return con->features;
960 }
961 /* feature ctl end */
962
amdgpu_ras_block_match_default(struct amdgpu_ras_block_object * block_obj,enum amdgpu_ras_block block)963 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
964 enum amdgpu_ras_block block)
965 {
966 if (!block_obj)
967 return -EINVAL;
968
969 if (block_obj->ras_comm.block == block)
970 return 0;
971
972 return -EINVAL;
973 }
974
amdgpu_ras_get_ras_block(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t sub_block_index)975 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
976 enum amdgpu_ras_block block, uint32_t sub_block_index)
977 {
978 struct amdgpu_ras_block_list *node, *tmp;
979 struct amdgpu_ras_block_object *obj;
980
981 if (block >= AMDGPU_RAS_BLOCK__LAST)
982 return NULL;
983
984 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
985 if (!node->ras_obj) {
986 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
987 continue;
988 }
989
990 obj = node->ras_obj;
991 if (obj->ras_block_match) {
992 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
993 return obj;
994 } else {
995 if (amdgpu_ras_block_match_default(obj, block) == 0)
996 return obj;
997 }
998 }
999
1000 return NULL;
1001 }
1002
amdgpu_ras_get_ecc_info(struct amdgpu_device * adev,struct ras_err_data * err_data)1003 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1004 {
1005 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1006 int ret = 0;
1007
1008 /*
1009 * choosing right query method according to
1010 * whether smu support query error information
1011 */
1012 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1013 if (ret == -EOPNOTSUPP) {
1014 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1015 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1016 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1017
1018 /* umc query_ras_error_address is also responsible for clearing
1019 * error status
1020 */
1021 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1022 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1023 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1024 } else if (!ret) {
1025 if (adev->umc.ras &&
1026 adev->umc.ras->ecc_info_query_ras_error_count)
1027 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1028
1029 if (adev->umc.ras &&
1030 adev->umc.ras->ecc_info_query_ras_error_address)
1031 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1032 }
1033 }
1034
amdgpu_ras_error_print_error_data(struct amdgpu_device * adev,struct ras_manager * ras_mgr,struct ras_err_data * err_data,const char * blk_name,bool is_ue)1035 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1036 struct ras_manager *ras_mgr,
1037 struct ras_err_data *err_data,
1038 const char *blk_name,
1039 bool is_ue)
1040 {
1041 struct amdgpu_smuio_mcm_config_info *mcm_info;
1042 struct ras_err_node *err_node;
1043 struct ras_err_info *err_info;
1044
1045 if (is_ue) {
1046 for_each_ras_error(err_node, err_data) {
1047 err_info = &err_node->err_info;
1048 mcm_info = &err_info->mcm_info;
1049 if (err_info->ue_count) {
1050 dev_info(adev->dev, "socket: %d, die: %d, "
1051 "%lld new uncorrectable hardware errors detected in %s block\n",
1052 mcm_info->socket_id,
1053 mcm_info->die_id,
1054 err_info->ue_count,
1055 blk_name);
1056 }
1057 }
1058
1059 for_each_ras_error(err_node, &ras_mgr->err_data) {
1060 err_info = &err_node->err_info;
1061 mcm_info = &err_info->mcm_info;
1062 dev_info(adev->dev, "socket: %d, die: %d, "
1063 "%lld uncorrectable hardware errors detected in total in %s block\n",
1064 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1065 }
1066
1067 } else {
1068 for_each_ras_error(err_node, err_data) {
1069 err_info = &err_node->err_info;
1070 mcm_info = &err_info->mcm_info;
1071 if (err_info->ce_count) {
1072 dev_info(adev->dev, "socket: %d, die: %d, "
1073 "%lld new correctable hardware errors detected in %s block\n",
1074 mcm_info->socket_id,
1075 mcm_info->die_id,
1076 err_info->ce_count,
1077 blk_name);
1078 }
1079 }
1080
1081 for_each_ras_error(err_node, &ras_mgr->err_data) {
1082 err_info = &err_node->err_info;
1083 mcm_info = &err_info->mcm_info;
1084 dev_info(adev->dev, "socket: %d, die: %d, "
1085 "%lld correctable hardware errors detected in total in %s block\n",
1086 mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
1087 }
1088 }
1089 }
1090
err_data_has_source_info(struct ras_err_data * data)1091 static inline bool err_data_has_source_info(struct ras_err_data *data)
1092 {
1093 return !list_empty(&data->err_node_list);
1094 }
1095
amdgpu_ras_error_generate_report(struct amdgpu_device * adev,struct ras_query_if * query_if,struct ras_err_data * err_data)1096 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1097 struct ras_query_if *query_if,
1098 struct ras_err_data *err_data)
1099 {
1100 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1101 const char *blk_name = get_ras_block_str(&query_if->head);
1102
1103 if (err_data->ce_count) {
1104 if (err_data_has_source_info(err_data)) {
1105 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
1106 } else if (!adev->aid_mask &&
1107 adev->smuio.funcs &&
1108 adev->smuio.funcs->get_socket_id &&
1109 adev->smuio.funcs->get_die_id) {
1110 dev_info(adev->dev, "socket: %d, die: %d "
1111 "%ld correctable hardware errors "
1112 "detected in %s block\n",
1113 adev->smuio.funcs->get_socket_id(adev),
1114 adev->smuio.funcs->get_die_id(adev),
1115 ras_mgr->err_data.ce_count,
1116 blk_name);
1117 } else {
1118 dev_info(adev->dev, "%ld correctable hardware errors "
1119 "detected in %s block\n",
1120 ras_mgr->err_data.ce_count,
1121 blk_name);
1122 }
1123 }
1124
1125 if (err_data->ue_count) {
1126 if (err_data_has_source_info(err_data)) {
1127 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
1128 } else if (!adev->aid_mask &&
1129 adev->smuio.funcs &&
1130 adev->smuio.funcs->get_socket_id &&
1131 adev->smuio.funcs->get_die_id) {
1132 dev_info(adev->dev, "socket: %d, die: %d "
1133 "%ld uncorrectable hardware errors "
1134 "detected in %s block\n",
1135 adev->smuio.funcs->get_socket_id(adev),
1136 adev->smuio.funcs->get_die_id(adev),
1137 ras_mgr->err_data.ue_count,
1138 blk_name);
1139 } else {
1140 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1141 "detected in %s block\n",
1142 ras_mgr->err_data.ue_count,
1143 blk_name);
1144 }
1145 }
1146
1147 }
1148
amdgpu_rasmgr_error_data_statistic_update(struct ras_manager * obj,struct ras_err_data * err_data)1149 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1150 {
1151 struct ras_err_node *err_node;
1152 struct ras_err_info *err_info;
1153
1154 if (err_data_has_source_info(err_data)) {
1155 for_each_ras_error(err_node, err_data) {
1156 err_info = &err_node->err_info;
1157
1158 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1159 &err_info->mcm_info, NULL, err_info->ce_count);
1160 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1161 &err_info->mcm_info, NULL, err_info->ue_count);
1162 }
1163 } else {
1164 /* for legacy asic path which doesn't has error source info */
1165 obj->err_data.ue_count += err_data->ue_count;
1166 obj->err_data.ce_count += err_data->ce_count;
1167 }
1168 }
1169
amdgpu_ras_query_error_status_helper(struct amdgpu_device * adev,struct ras_query_if * info,struct ras_err_data * err_data,unsigned int error_query_mode)1170 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1171 struct ras_query_if *info,
1172 struct ras_err_data *err_data,
1173 unsigned int error_query_mode)
1174 {
1175 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1176 struct amdgpu_ras_block_object *block_obj = NULL;
1177
1178 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1179 return -EINVAL;
1180
1181 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1182 return -EINVAL;
1183
1184 if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1185 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1186 amdgpu_ras_get_ecc_info(adev, err_data);
1187 } else {
1188 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1189 if (!block_obj || !block_obj->hw_ops) {
1190 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1191 get_ras_block_str(&info->head));
1192 return -EINVAL;
1193 }
1194
1195 if (block_obj->hw_ops->query_ras_error_count)
1196 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1197
1198 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1199 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1200 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1201 if (block_obj->hw_ops->query_ras_error_status)
1202 block_obj->hw_ops->query_ras_error_status(adev);
1203 }
1204 }
1205 } else {
1206 /* FIXME: add code to check return value later */
1207 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
1208 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
1209 }
1210
1211 return 0;
1212 }
1213
1214 /* query/inject/cure begin */
amdgpu_ras_query_error_status(struct amdgpu_device * adev,struct ras_query_if * info)1215 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1216 {
1217 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1218 struct ras_err_data err_data;
1219 unsigned int error_query_mode;
1220 int ret;
1221
1222 if (!obj)
1223 return -EINVAL;
1224
1225 ret = amdgpu_ras_error_data_init(&err_data);
1226 if (ret)
1227 return ret;
1228
1229 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1230 return -EINVAL;
1231
1232 ret = amdgpu_ras_query_error_status_helper(adev, info,
1233 &err_data,
1234 error_query_mode);
1235 if (ret)
1236 goto out_fini_err_data;
1237
1238 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1239
1240 info->ue_count = obj->err_data.ue_count;
1241 info->ce_count = obj->err_data.ce_count;
1242
1243 amdgpu_ras_error_generate_report(adev, info, &err_data);
1244
1245 out_fini_err_data:
1246 amdgpu_ras_error_data_fini(&err_data);
1247
1248 return ret;
1249 }
1250
amdgpu_ras_reset_error_count(struct amdgpu_device * adev,enum amdgpu_ras_block block)1251 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1252 enum amdgpu_ras_block block)
1253 {
1254 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1255 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1256 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1257 struct amdgpu_hive_info *hive;
1258 int hive_ras_recovery = 0;
1259
1260 if (!block_obj || !block_obj->hw_ops) {
1261 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1262 ras_block_str(block));
1263 return -EOPNOTSUPP;
1264 }
1265
1266 if (!amdgpu_ras_is_supported(adev, block) ||
1267 !amdgpu_ras_get_mca_debug_mode(adev))
1268 return -EOPNOTSUPP;
1269
1270 hive = amdgpu_get_xgmi_hive(adev);
1271 if (hive) {
1272 hive_ras_recovery = atomic_read(&hive->ras_recovery);
1273 amdgpu_put_xgmi_hive(hive);
1274 }
1275
1276 /* skip ras error reset in gpu reset */
1277 if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1278 hive_ras_recovery) &&
1279 mca_funcs && mca_funcs->mca_set_debug_mode)
1280 return -EOPNOTSUPP;
1281
1282 if (block_obj->hw_ops->reset_ras_error_count)
1283 block_obj->hw_ops->reset_ras_error_count(adev);
1284
1285 return 0;
1286 }
1287
amdgpu_ras_reset_error_status(struct amdgpu_device * adev,enum amdgpu_ras_block block)1288 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1289 enum amdgpu_ras_block block)
1290 {
1291 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1292
1293 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1294 return 0;
1295
1296 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1297 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1298 if (block_obj->hw_ops->reset_ras_error_status)
1299 block_obj->hw_ops->reset_ras_error_status(adev);
1300 }
1301
1302 return 0;
1303 }
1304
1305 /* wrapper of psp_ras_trigger_error */
amdgpu_ras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1306 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1307 struct ras_inject_if *info)
1308 {
1309 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1310 struct ta_ras_trigger_error_input block_info = {
1311 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1312 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1313 .sub_block_index = info->head.sub_block_index,
1314 .address = info->address,
1315 .value = info->value,
1316 };
1317 int ret = -EINVAL;
1318 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1319 info->head.block,
1320 info->head.sub_block_index);
1321
1322 /* inject on guest isn't allowed, return success directly */
1323 if (amdgpu_sriov_vf(adev))
1324 return 0;
1325
1326 if (!obj)
1327 return -EINVAL;
1328
1329 if (!block_obj || !block_obj->hw_ops) {
1330 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1331 get_ras_block_str(&info->head));
1332 return -EINVAL;
1333 }
1334
1335 /* Calculate XGMI relative offset */
1336 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1337 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1338 block_info.address =
1339 amdgpu_xgmi_get_relative_phy_addr(adev,
1340 block_info.address);
1341 }
1342
1343 if (block_obj->hw_ops->ras_error_inject) {
1344 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1345 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1346 else /* Special ras_error_inject is defined (e.g: xgmi) */
1347 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1348 info->instance_mask);
1349 } else {
1350 /* default path */
1351 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1352 }
1353
1354 if (ret)
1355 dev_err(adev->dev, "ras inject %s failed %d\n",
1356 get_ras_block_str(&info->head), ret);
1357
1358 return ret;
1359 }
1360
1361 /**
1362 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1363 * @adev: pointer to AMD GPU device
1364 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1365 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1366 * @query_info: pointer to ras_query_if
1367 *
1368 * Return 0 for query success or do nothing, otherwise return an error
1369 * on failures
1370 */
amdgpu_ras_query_error_count_helper(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1371 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1372 unsigned long *ce_count,
1373 unsigned long *ue_count,
1374 struct ras_query_if *query_info)
1375 {
1376 int ret;
1377
1378 if (!query_info)
1379 /* do nothing if query_info is not specified */
1380 return 0;
1381
1382 ret = amdgpu_ras_query_error_status(adev, query_info);
1383 if (ret)
1384 return ret;
1385
1386 *ce_count += query_info->ce_count;
1387 *ue_count += query_info->ue_count;
1388
1389 /* some hardware/IP supports read to clear
1390 * no need to explictly reset the err status after the query call */
1391 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1392 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1393 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1394 dev_warn(adev->dev,
1395 "Failed to reset error counter and error status\n");
1396 }
1397
1398 return 0;
1399 }
1400
1401 /**
1402 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1403 * @adev: pointer to AMD GPU device
1404 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1405 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1406 * errors.
1407 * @query_info: pointer to ras_query_if if the query request is only for
1408 * specific ip block; if info is NULL, then the qurey request is for
1409 * all the ip blocks that support query ras error counters/status
1410 *
1411 * If set, @ce_count or @ue_count, count and return the corresponding
1412 * error counts in those integer pointers. Return 0 if the device
1413 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1414 */
amdgpu_ras_query_error_count(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1415 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1416 unsigned long *ce_count,
1417 unsigned long *ue_count,
1418 struct ras_query_if *query_info)
1419 {
1420 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1421 struct ras_manager *obj;
1422 unsigned long ce, ue;
1423 int ret;
1424
1425 if (!adev->ras_enabled || !con)
1426 return -EOPNOTSUPP;
1427
1428 /* Don't count since no reporting.
1429 */
1430 if (!ce_count && !ue_count)
1431 return 0;
1432
1433 ce = 0;
1434 ue = 0;
1435 if (!query_info) {
1436 /* query all the ip blocks that support ras query interface */
1437 list_for_each_entry(obj, &con->head, node) {
1438 struct ras_query_if info = {
1439 .head = obj->head,
1440 };
1441
1442 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1443 }
1444 } else {
1445 /* query specific ip block */
1446 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1447 }
1448
1449 if (ret)
1450 return ret;
1451
1452 if (ce_count)
1453 *ce_count = ce;
1454
1455 if (ue_count)
1456 *ue_count = ue;
1457
1458 return 0;
1459 }
1460 /* query/inject/cure end */
1461
1462
1463 /* sysfs begin */
1464
1465 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1466 struct ras_badpage **bps, unsigned int *count);
1467
amdgpu_ras_badpage_flags_str(unsigned int flags)1468 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1469 {
1470 switch (flags) {
1471 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1472 return "R";
1473 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1474 return "P";
1475 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1476 default:
1477 return "F";
1478 }
1479 }
1480
1481 /**
1482 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1483 *
1484 * It allows user to read the bad pages of vram on the gpu through
1485 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1486 *
1487 * It outputs multiple lines, and each line stands for one gpu page.
1488 *
1489 * The format of one line is below,
1490 * gpu pfn : gpu page size : flags
1491 *
1492 * gpu pfn and gpu page size are printed in hex format.
1493 * flags can be one of below character,
1494 *
1495 * R: reserved, this gpu page is reserved and not able to use.
1496 *
1497 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1498 * in next window of page_reserve.
1499 *
1500 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1501 *
1502 * Examples:
1503 *
1504 * .. code-block:: bash
1505 *
1506 * 0x00000001 : 0x00001000 : R
1507 * 0x00000002 : 0x00001000 : P
1508 *
1509 */
1510
amdgpu_ras_sysfs_badpages_read(struct file * f,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)1511 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1512 struct kobject *kobj, struct bin_attribute *attr,
1513 char *buf, loff_t ppos, size_t count)
1514 {
1515 struct amdgpu_ras *con =
1516 container_of(attr, struct amdgpu_ras, badpages_attr);
1517 struct amdgpu_device *adev = con->adev;
1518 const unsigned int element_size =
1519 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1520 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1521 unsigned int end = div64_ul(ppos + count - 1, element_size);
1522 ssize_t s = 0;
1523 struct ras_badpage *bps = NULL;
1524 unsigned int bps_count = 0;
1525
1526 memset(buf, 0, count);
1527
1528 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1529 return 0;
1530
1531 for (; start < end && start < bps_count; start++)
1532 s += scnprintf(&buf[s], element_size + 1,
1533 "0x%08x : 0x%08x : %1s\n",
1534 bps[start].bp,
1535 bps[start].size,
1536 amdgpu_ras_badpage_flags_str(bps[start].flags));
1537
1538 kfree(bps);
1539
1540 return s;
1541 }
1542
amdgpu_ras_sysfs_features_read(struct device * dev,struct device_attribute * attr,char * buf)1543 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1544 struct device_attribute *attr, char *buf)
1545 {
1546 struct amdgpu_ras *con =
1547 container_of(attr, struct amdgpu_ras, features_attr);
1548
1549 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1550 }
1551
amdgpu_ras_sysfs_version_show(struct device * dev,struct device_attribute * attr,char * buf)1552 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554 {
1555 struct amdgpu_ras *con =
1556 container_of(attr, struct amdgpu_ras, version_attr);
1557 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1558 }
1559
amdgpu_ras_sysfs_schema_show(struct device * dev,struct device_attribute * attr,char * buf)1560 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct amdgpu_ras *con =
1564 container_of(attr, struct amdgpu_ras, schema_attr);
1565 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1566 }
1567
amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device * adev)1568 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1569 {
1570 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1571
1572 if (adev->dev->kobj.sd)
1573 sysfs_remove_file_from_group(&adev->dev->kobj,
1574 &con->badpages_attr.attr,
1575 RAS_FS_NAME);
1576 }
1577
amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device * adev)1578 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1579 {
1580 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1581 struct attribute *attrs[] = {
1582 &con->features_attr.attr,
1583 &con->version_attr.attr,
1584 &con->schema_attr.attr,
1585 NULL
1586 };
1587 struct attribute_group group = {
1588 .name = RAS_FS_NAME,
1589 .attrs = attrs,
1590 };
1591
1592 if (adev->dev->kobj.sd)
1593 sysfs_remove_group(&adev->dev->kobj, &group);
1594
1595 return 0;
1596 }
1597
amdgpu_ras_sysfs_create(struct amdgpu_device * adev,struct ras_common_if * head)1598 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1599 struct ras_common_if *head)
1600 {
1601 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1602
1603 if (!obj || obj->attr_inuse)
1604 return -EINVAL;
1605
1606 get_obj(obj);
1607
1608 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1609 "%s_err_count", head->name);
1610
1611 obj->sysfs_attr = (struct device_attribute){
1612 .attr = {
1613 .name = obj->fs_data.sysfs_name,
1614 .mode = S_IRUGO,
1615 },
1616 .show = amdgpu_ras_sysfs_read,
1617 };
1618 sysfs_attr_init(&obj->sysfs_attr.attr);
1619
1620 if (sysfs_add_file_to_group(&adev->dev->kobj,
1621 &obj->sysfs_attr.attr,
1622 RAS_FS_NAME)) {
1623 put_obj(obj);
1624 return -EINVAL;
1625 }
1626
1627 obj->attr_inuse = 1;
1628
1629 return 0;
1630 }
1631
amdgpu_ras_sysfs_remove(struct amdgpu_device * adev,struct ras_common_if * head)1632 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1633 struct ras_common_if *head)
1634 {
1635 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1636
1637 if (!obj || !obj->attr_inuse)
1638 return -EINVAL;
1639
1640 if (adev->dev->kobj.sd)
1641 sysfs_remove_file_from_group(&adev->dev->kobj,
1642 &obj->sysfs_attr.attr,
1643 RAS_FS_NAME);
1644 obj->attr_inuse = 0;
1645 put_obj(obj);
1646
1647 return 0;
1648 }
1649
amdgpu_ras_sysfs_remove_all(struct amdgpu_device * adev)1650 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1651 {
1652 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1653 struct ras_manager *obj, *tmp;
1654
1655 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1656 amdgpu_ras_sysfs_remove(adev, &obj->head);
1657 }
1658
1659 if (amdgpu_bad_page_threshold != 0)
1660 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1661
1662 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1663
1664 return 0;
1665 }
1666 /* sysfs end */
1667
1668 /**
1669 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1670 *
1671 * Normally when there is an uncorrectable error, the driver will reset
1672 * the GPU to recover. However, in the event of an unrecoverable error,
1673 * the driver provides an interface to reboot the system automatically
1674 * in that event.
1675 *
1676 * The following file in debugfs provides that interface:
1677 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1678 *
1679 * Usage:
1680 *
1681 * .. code-block:: bash
1682 *
1683 * echo true > .../ras/auto_reboot
1684 *
1685 */
1686 /* debugfs begin */
amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * adev)1687 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1688 {
1689 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1690 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1691 struct drm_minor *minor = adev_to_drm(adev)->primary;
1692 struct dentry *dir;
1693
1694 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1695 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1696 &amdgpu_ras_debugfs_ctrl_ops);
1697 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1698 &amdgpu_ras_debugfs_eeprom_ops);
1699 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1700 &con->bad_page_cnt_threshold);
1701 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1702 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1703 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1704 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1705 &amdgpu_ras_debugfs_eeprom_size_ops);
1706 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1707 S_IRUGO, dir, adev,
1708 &amdgpu_ras_debugfs_eeprom_table_ops);
1709 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1710
1711 /*
1712 * After one uncorrectable error happens, usually GPU recovery will
1713 * be scheduled. But due to the known problem in GPU recovery failing
1714 * to bring GPU back, below interface provides one direct way to
1715 * user to reboot system automatically in such case within
1716 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1717 * will never be called.
1718 */
1719 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1720
1721 /*
1722 * User could set this not to clean up hardware's error count register
1723 * of RAS IPs during ras recovery.
1724 */
1725 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1726 &con->disable_ras_err_cnt_harvest);
1727 return dir;
1728 }
1729
amdgpu_ras_debugfs_create(struct amdgpu_device * adev,struct ras_fs_if * head,struct dentry * dir)1730 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1731 struct ras_fs_if *head,
1732 struct dentry *dir)
1733 {
1734 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1735
1736 if (!obj || !dir)
1737 return;
1738
1739 get_obj(obj);
1740
1741 memcpy(obj->fs_data.debugfs_name,
1742 head->debugfs_name,
1743 sizeof(obj->fs_data.debugfs_name));
1744
1745 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1746 obj, &amdgpu_ras_debugfs_ops);
1747 }
1748
amdgpu_ras_debugfs_create_all(struct amdgpu_device * adev)1749 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1750 {
1751 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1752 struct dentry *dir;
1753 struct ras_manager *obj;
1754 struct ras_fs_if fs_info;
1755
1756 /*
1757 * it won't be called in resume path, no need to check
1758 * suspend and gpu reset status
1759 */
1760 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1761 return;
1762
1763 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1764
1765 list_for_each_entry(obj, &con->head, node) {
1766 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1767 (obj->attr_inuse == 1)) {
1768 sprintf(fs_info.debugfs_name, "%s_err_inject",
1769 get_ras_block_str(&obj->head));
1770 fs_info.head = obj->head;
1771 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1772 }
1773 }
1774
1775 amdgpu_mca_smu_debugfs_init(adev, dir);
1776 }
1777
1778 /* debugfs end */
1779
1780 /* ras fs */
1781 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1782 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1783 static DEVICE_ATTR(features, S_IRUGO,
1784 amdgpu_ras_sysfs_features_read, NULL);
1785 static DEVICE_ATTR(version, 0444,
1786 amdgpu_ras_sysfs_version_show, NULL);
1787 static DEVICE_ATTR(schema, 0444,
1788 amdgpu_ras_sysfs_schema_show, NULL);
amdgpu_ras_fs_init(struct amdgpu_device * adev)1789 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1790 {
1791 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1792 struct attribute_group group = {
1793 .name = RAS_FS_NAME,
1794 };
1795 struct attribute *attrs[] = {
1796 &con->features_attr.attr,
1797 &con->version_attr.attr,
1798 &con->schema_attr.attr,
1799 NULL
1800 };
1801 struct bin_attribute *bin_attrs[] = {
1802 NULL,
1803 NULL,
1804 };
1805 int r;
1806
1807 group.attrs = attrs;
1808
1809 /* add features entry */
1810 con->features_attr = dev_attr_features;
1811 sysfs_attr_init(attrs[0]);
1812
1813 /* add version entry */
1814 con->version_attr = dev_attr_version;
1815 sysfs_attr_init(attrs[1]);
1816
1817 /* add schema entry */
1818 con->schema_attr = dev_attr_schema;
1819 sysfs_attr_init(attrs[2]);
1820
1821 if (amdgpu_bad_page_threshold != 0) {
1822 /* add bad_page_features entry */
1823 bin_attr_gpu_vram_bad_pages.private = NULL;
1824 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1825 bin_attrs[0] = &con->badpages_attr;
1826 group.bin_attrs = bin_attrs;
1827 sysfs_bin_attr_init(bin_attrs[0]);
1828 }
1829
1830 r = sysfs_create_group(&adev->dev->kobj, &group);
1831 if (r)
1832 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1833
1834 return 0;
1835 }
1836
amdgpu_ras_fs_fini(struct amdgpu_device * adev)1837 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1838 {
1839 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1840 struct ras_manager *con_obj, *ip_obj, *tmp;
1841
1842 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1843 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1844 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1845 if (ip_obj)
1846 put_obj(ip_obj);
1847 }
1848 }
1849
1850 amdgpu_ras_sysfs_remove_all(adev);
1851 return 0;
1852 }
1853 /* ras fs end */
1854
1855 /* ih begin */
1856
1857 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1858 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1859 * register to check whether the interrupt is triggered or not, and properly
1860 * ack the interrupt if it is there
1861 */
amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device * adev)1862 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1863 {
1864 /* Fatal error events are handled on host side */
1865 if (amdgpu_sriov_vf(adev))
1866 return;
1867
1868 if (adev->nbio.ras &&
1869 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1870 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1871
1872 if (adev->nbio.ras &&
1873 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1874 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1875 }
1876
amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1877 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1878 struct amdgpu_iv_entry *entry)
1879 {
1880 bool poison_stat = false;
1881 struct amdgpu_device *adev = obj->adev;
1882 struct amdgpu_ras_block_object *block_obj =
1883 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1884
1885 if (!block_obj)
1886 return;
1887
1888 /* both query_poison_status and handle_poison_consumption are optional,
1889 * but at least one of them should be implemented if we need poison
1890 * consumption handler
1891 */
1892 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1893 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1894 if (!poison_stat) {
1895 /* Not poison consumption interrupt, no need to handle it */
1896 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1897 block_obj->ras_comm.name);
1898
1899 return;
1900 }
1901 }
1902
1903 amdgpu_umc_poison_handler(adev, false);
1904
1905 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1906 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1907
1908 /* gpu reset is fallback for failed and default cases */
1909 if (poison_stat) {
1910 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1911 block_obj->ras_comm.name);
1912 amdgpu_ras_reset_gpu(adev);
1913 } else {
1914 amdgpu_gfx_poison_consumption_handler(adev, entry);
1915 }
1916 }
1917
amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1918 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1919 struct amdgpu_iv_entry *entry)
1920 {
1921 dev_info(obj->adev->dev,
1922 "Poison is created\n");
1923 }
1924
amdgpu_ras_interrupt_umc_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)1925 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1926 struct amdgpu_iv_entry *entry)
1927 {
1928 struct ras_ih_data *data = &obj->ih_data;
1929 struct ras_err_data err_data;
1930 int ret;
1931
1932 if (!data->cb)
1933 return;
1934
1935 ret = amdgpu_ras_error_data_init(&err_data);
1936 if (ret)
1937 return;
1938
1939 /* Let IP handle its data, maybe we need get the output
1940 * from the callback to update the error type/count, etc
1941 */
1942 ret = data->cb(obj->adev, &err_data, entry);
1943 /* ue will trigger an interrupt, and in that case
1944 * we need do a reset to recovery the whole system.
1945 * But leave IP do that recovery, here we just dispatch
1946 * the error.
1947 */
1948 if (ret == AMDGPU_RAS_SUCCESS) {
1949 /* these counts could be left as 0 if
1950 * some blocks do not count error number
1951 */
1952 obj->err_data.ue_count += err_data.ue_count;
1953 obj->err_data.ce_count += err_data.ce_count;
1954 }
1955
1956 amdgpu_ras_error_data_fini(&err_data);
1957 }
1958
amdgpu_ras_interrupt_handler(struct ras_manager * obj)1959 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1960 {
1961 struct ras_ih_data *data = &obj->ih_data;
1962 struct amdgpu_iv_entry entry;
1963
1964 while (data->rptr != data->wptr) {
1965 rmb();
1966 memcpy(&entry, &data->ring[data->rptr],
1967 data->element_size);
1968
1969 wmb();
1970 data->rptr = (data->aligned_element_size +
1971 data->rptr) % data->ring_size;
1972
1973 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1974 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1975 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1976 else
1977 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1978 } else {
1979 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1980 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1981 else
1982 dev_warn(obj->adev->dev,
1983 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1984 }
1985 }
1986 }
1987
amdgpu_ras_interrupt_process_handler(struct work_struct * work)1988 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1989 {
1990 struct ras_ih_data *data =
1991 container_of(work, struct ras_ih_data, ih_work);
1992 struct ras_manager *obj =
1993 container_of(data, struct ras_manager, ih_data);
1994
1995 amdgpu_ras_interrupt_handler(obj);
1996 }
1997
amdgpu_ras_interrupt_dispatch(struct amdgpu_device * adev,struct ras_dispatch_if * info)1998 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1999 struct ras_dispatch_if *info)
2000 {
2001 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
2002 struct ras_ih_data *data = &obj->ih_data;
2003
2004 if (!obj)
2005 return -EINVAL;
2006
2007 if (data->inuse == 0)
2008 return 0;
2009
2010 /* Might be overflow... */
2011 memcpy(&data->ring[data->wptr], info->entry,
2012 data->element_size);
2013
2014 wmb();
2015 data->wptr = (data->aligned_element_size +
2016 data->wptr) % data->ring_size;
2017
2018 schedule_work(&data->ih_work);
2019
2020 return 0;
2021 }
2022
amdgpu_ras_interrupt_remove_handler(struct amdgpu_device * adev,struct ras_common_if * head)2023 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2024 struct ras_common_if *head)
2025 {
2026 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2027 struct ras_ih_data *data;
2028
2029 if (!obj)
2030 return -EINVAL;
2031
2032 data = &obj->ih_data;
2033 if (data->inuse == 0)
2034 return 0;
2035
2036 cancel_work_sync(&data->ih_work);
2037
2038 kfree(data->ring);
2039 memset(data, 0, sizeof(*data));
2040 put_obj(obj);
2041
2042 return 0;
2043 }
2044
amdgpu_ras_interrupt_add_handler(struct amdgpu_device * adev,struct ras_common_if * head)2045 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2046 struct ras_common_if *head)
2047 {
2048 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2049 struct ras_ih_data *data;
2050 struct amdgpu_ras_block_object *ras_obj;
2051
2052 if (!obj) {
2053 /* in case we registe the IH before enable ras feature */
2054 obj = amdgpu_ras_create_obj(adev, head);
2055 if (!obj)
2056 return -EINVAL;
2057 } else
2058 get_obj(obj);
2059
2060 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2061
2062 data = &obj->ih_data;
2063 /* add the callback.etc */
2064 *data = (struct ras_ih_data) {
2065 .inuse = 0,
2066 .cb = ras_obj->ras_cb,
2067 .element_size = sizeof(struct amdgpu_iv_entry),
2068 .rptr = 0,
2069 .wptr = 0,
2070 };
2071
2072 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2073
2074 data->aligned_element_size = ALIGN(data->element_size, 8);
2075 /* the ring can store 64 iv entries. */
2076 data->ring_size = 64 * data->aligned_element_size;
2077 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2078 if (!data->ring) {
2079 put_obj(obj);
2080 return -ENOMEM;
2081 }
2082
2083 /* IH is ready */
2084 data->inuse = 1;
2085
2086 return 0;
2087 }
2088
amdgpu_ras_interrupt_remove_all(struct amdgpu_device * adev)2089 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2090 {
2091 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2092 struct ras_manager *obj, *tmp;
2093
2094 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2095 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2096 }
2097
2098 return 0;
2099 }
2100 /* ih end */
2101
2102 /* traversal all IPs except NBIO to query error counter */
amdgpu_ras_log_on_err_counter(struct amdgpu_device * adev)2103 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2104 {
2105 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2106 struct ras_manager *obj;
2107
2108 if (!adev->ras_enabled || !con)
2109 return;
2110
2111 list_for_each_entry(obj, &con->head, node) {
2112 struct ras_query_if info = {
2113 .head = obj->head,
2114 };
2115
2116 /*
2117 * PCIE_BIF IP has one different isr by ras controller
2118 * interrupt, the specific ras counter query will be
2119 * done in that isr. So skip such block from common
2120 * sync flood interrupt isr calling.
2121 */
2122 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2123 continue;
2124
2125 /*
2126 * this is a workaround for aldebaran, skip send msg to
2127 * smu to get ecc_info table due to smu handle get ecc
2128 * info table failed temporarily.
2129 * should be removed until smu fix handle ecc_info table.
2130 */
2131 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2132 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2133 IP_VERSION(13, 0, 2)))
2134 continue;
2135
2136 amdgpu_ras_query_error_status(adev, &info);
2137
2138 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2139 IP_VERSION(11, 0, 2) &&
2140 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2141 IP_VERSION(11, 0, 4) &&
2142 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2143 IP_VERSION(13, 0, 0)) {
2144 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2145 dev_warn(adev->dev, "Failed to reset error counter and error status");
2146 }
2147 }
2148 }
2149
2150 /* Parse RdRspStatus and WrRspStatus */
amdgpu_ras_error_status_query(struct amdgpu_device * adev,struct ras_query_if * info)2151 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2152 struct ras_query_if *info)
2153 {
2154 struct amdgpu_ras_block_object *block_obj;
2155 /*
2156 * Only two block need to query read/write
2157 * RspStatus at current state
2158 */
2159 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2160 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2161 return;
2162
2163 block_obj = amdgpu_ras_get_ras_block(adev,
2164 info->head.block,
2165 info->head.sub_block_index);
2166
2167 if (!block_obj || !block_obj->hw_ops) {
2168 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2169 get_ras_block_str(&info->head));
2170 return;
2171 }
2172
2173 if (block_obj->hw_ops->query_ras_error_status)
2174 block_obj->hw_ops->query_ras_error_status(adev);
2175
2176 }
2177
amdgpu_ras_query_err_status(struct amdgpu_device * adev)2178 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2179 {
2180 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2181 struct ras_manager *obj;
2182
2183 if (!adev->ras_enabled || !con)
2184 return;
2185
2186 list_for_each_entry(obj, &con->head, node) {
2187 struct ras_query_if info = {
2188 .head = obj->head,
2189 };
2190
2191 amdgpu_ras_error_status_query(adev, &info);
2192 }
2193 }
2194
2195 /* recovery begin */
2196
2197 /* return 0 on success.
2198 * caller need free bps.
2199 */
amdgpu_ras_badpages_read(struct amdgpu_device * adev,struct ras_badpage ** bps,unsigned int * count)2200 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2201 struct ras_badpage **bps, unsigned int *count)
2202 {
2203 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2204 struct ras_err_handler_data *data;
2205 int i = 0;
2206 int ret = 0, status;
2207
2208 if (!con || !con->eh_data || !bps || !count)
2209 return -EINVAL;
2210
2211 mutex_lock(&con->recovery_lock);
2212 data = con->eh_data;
2213 if (!data || data->count == 0) {
2214 *bps = NULL;
2215 ret = -EINVAL;
2216 goto out;
2217 }
2218
2219 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2220 if (!*bps) {
2221 ret = -ENOMEM;
2222 goto out;
2223 }
2224
2225 for (; i < data->count; i++) {
2226 (*bps)[i] = (struct ras_badpage){
2227 .bp = data->bps[i].retired_page,
2228 .size = AMDGPU_GPU_PAGE_SIZE,
2229 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2230 };
2231 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2232 data->bps[i].retired_page);
2233 if (status == -EBUSY)
2234 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2235 else if (status == -ENOENT)
2236 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2237 }
2238
2239 *count = data->count;
2240 out:
2241 mutex_unlock(&con->recovery_lock);
2242 return ret;
2243 }
2244
amdgpu_ras_do_recovery(struct work_struct * work)2245 static void amdgpu_ras_do_recovery(struct work_struct *work)
2246 {
2247 struct amdgpu_ras *ras =
2248 container_of(work, struct amdgpu_ras, recovery_work);
2249 struct amdgpu_device *remote_adev = NULL;
2250 struct amdgpu_device *adev = ras->adev;
2251 struct list_head device_list, *device_list_handle = NULL;
2252 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2253
2254 if (hive)
2255 atomic_set(&hive->ras_recovery, 1);
2256 if (!ras->disable_ras_err_cnt_harvest) {
2257
2258 /* Build list of devices to query RAS related errors */
2259 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2260 device_list_handle = &hive->device_list;
2261 } else {
2262 INIT_LIST_HEAD(&device_list);
2263 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2264 device_list_handle = &device_list;
2265 }
2266
2267 list_for_each_entry(remote_adev,
2268 device_list_handle, gmc.xgmi.head) {
2269 amdgpu_ras_query_err_status(remote_adev);
2270 amdgpu_ras_log_on_err_counter(remote_adev);
2271 }
2272
2273 }
2274
2275 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2276 struct amdgpu_reset_context reset_context;
2277 memset(&reset_context, 0, sizeof(reset_context));
2278
2279 reset_context.method = AMD_RESET_METHOD_NONE;
2280 reset_context.reset_req_dev = adev;
2281
2282 /* Perform full reset in fatal error mode */
2283 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2284 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2285 else {
2286 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2287
2288 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2289 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2290 reset_context.method = AMD_RESET_METHOD_MODE2;
2291 }
2292
2293 /* Fatal error occurs in poison mode, mode1 reset is used to
2294 * recover gpu.
2295 */
2296 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2297 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2298 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2299
2300 psp_fatal_error_recovery_quirk(&adev->psp);
2301 }
2302 }
2303
2304 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2305 }
2306 atomic_set(&ras->in_recovery, 0);
2307 if (hive) {
2308 atomic_set(&hive->ras_recovery, 0);
2309 amdgpu_put_xgmi_hive(hive);
2310 }
2311 }
2312
2313 /* alloc/realloc bps array */
amdgpu_ras_realloc_eh_data_space(struct amdgpu_device * adev,struct ras_err_handler_data * data,int pages)2314 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2315 struct ras_err_handler_data *data, int pages)
2316 {
2317 unsigned int old_space = data->count + data->space_left;
2318 unsigned int new_space = old_space + pages;
2319 unsigned int align_space = ALIGN(new_space, 512);
2320 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2321
2322 if (!bps) {
2323 return -ENOMEM;
2324 }
2325
2326 if (data->bps) {
2327 memcpy(bps, data->bps,
2328 data->count * sizeof(*data->bps));
2329 kfree(data->bps);
2330 }
2331
2332 data->bps = bps;
2333 data->space_left += align_space - old_space;
2334 return 0;
2335 }
2336
2337 /* it deal with vram only. */
amdgpu_ras_add_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)2338 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2339 struct eeprom_table_record *bps, int pages)
2340 {
2341 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2342 struct ras_err_handler_data *data;
2343 int ret = 0;
2344 uint32_t i;
2345
2346 if (!con || !con->eh_data || !bps || pages <= 0)
2347 return 0;
2348
2349 mutex_lock(&con->recovery_lock);
2350 data = con->eh_data;
2351 if (!data)
2352 goto out;
2353
2354 for (i = 0; i < pages; i++) {
2355 if (amdgpu_ras_check_bad_page_unlock(con,
2356 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2357 continue;
2358
2359 if (!data->space_left &&
2360 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2361 ret = -ENOMEM;
2362 goto out;
2363 }
2364
2365 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2366 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2367 AMDGPU_GPU_PAGE_SIZE);
2368
2369 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2370 data->count++;
2371 data->space_left--;
2372 }
2373 out:
2374 mutex_unlock(&con->recovery_lock);
2375
2376 return ret;
2377 }
2378
2379 /*
2380 * write error record array to eeprom, the function should be
2381 * protected by recovery_lock
2382 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2383 */
amdgpu_ras_save_bad_pages(struct amdgpu_device * adev,unsigned long * new_cnt)2384 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2385 unsigned long *new_cnt)
2386 {
2387 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2388 struct ras_err_handler_data *data;
2389 struct amdgpu_ras_eeprom_control *control;
2390 int save_count;
2391
2392 if (!con || !con->eh_data) {
2393 if (new_cnt)
2394 *new_cnt = 0;
2395
2396 return 0;
2397 }
2398
2399 mutex_lock(&con->recovery_lock);
2400 control = &con->eeprom_control;
2401 data = con->eh_data;
2402 save_count = data->count - control->ras_num_recs;
2403 mutex_unlock(&con->recovery_lock);
2404
2405 if (new_cnt)
2406 *new_cnt = save_count / adev->umc.retire_unit;
2407
2408 /* only new entries are saved */
2409 if (save_count > 0) {
2410 if (amdgpu_ras_eeprom_append(control,
2411 &data->bps[control->ras_num_recs],
2412 save_count)) {
2413 dev_err(adev->dev, "Failed to save EEPROM table data!");
2414 return -EIO;
2415 }
2416
2417 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2418 }
2419
2420 return 0;
2421 }
2422
2423 /*
2424 * read error record array in eeprom and reserve enough space for
2425 * storing new bad pages
2426 */
amdgpu_ras_load_bad_pages(struct amdgpu_device * adev)2427 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2428 {
2429 struct amdgpu_ras_eeprom_control *control =
2430 &adev->psp.ras_context.ras->eeprom_control;
2431 struct eeprom_table_record *bps;
2432 int ret;
2433
2434 /* no bad page record, skip eeprom access */
2435 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2436 return 0;
2437
2438 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2439 if (!bps)
2440 return -ENOMEM;
2441
2442 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2443 if (ret)
2444 dev_err(adev->dev, "Failed to load EEPROM table records!");
2445 else
2446 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2447
2448 kfree(bps);
2449 return ret;
2450 }
2451
amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras * con,uint64_t addr)2452 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2453 uint64_t addr)
2454 {
2455 struct ras_err_handler_data *data = con->eh_data;
2456 int i;
2457
2458 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2459 for (i = 0; i < data->count; i++)
2460 if (addr == data->bps[i].retired_page)
2461 return true;
2462
2463 return false;
2464 }
2465
2466 /*
2467 * check if an address belongs to bad page
2468 *
2469 * Note: this check is only for umc block
2470 */
amdgpu_ras_check_bad_page(struct amdgpu_device * adev,uint64_t addr)2471 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2472 uint64_t addr)
2473 {
2474 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2475 bool ret = false;
2476
2477 if (!con || !con->eh_data)
2478 return ret;
2479
2480 mutex_lock(&con->recovery_lock);
2481 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2482 mutex_unlock(&con->recovery_lock);
2483 return ret;
2484 }
2485
amdgpu_ras_validate_threshold(struct amdgpu_device * adev,uint32_t max_count)2486 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2487 uint32_t max_count)
2488 {
2489 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2490
2491 /*
2492 * Justification of value bad_page_cnt_threshold in ras structure
2493 *
2494 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2495 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2496 * scenarios accordingly.
2497 *
2498 * Bad page retirement enablement:
2499 * - If amdgpu_bad_page_threshold = -2,
2500 * bad_page_cnt_threshold = typical value by formula.
2501 *
2502 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2503 * max record length in eeprom, use it directly.
2504 *
2505 * Bad page retirement disablement:
2506 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2507 * functionality is disabled, and bad_page_cnt_threshold will
2508 * take no effect.
2509 */
2510
2511 if (amdgpu_bad_page_threshold < 0) {
2512 u64 val = adev->gmc.mc_vram_size;
2513
2514 do_div(val, RAS_BAD_PAGE_COVER);
2515 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2516 max_count);
2517 } else {
2518 con->bad_page_cnt_threshold = min_t(int, max_count,
2519 amdgpu_bad_page_threshold);
2520 }
2521 }
2522
amdgpu_ras_recovery_init(struct amdgpu_device * adev)2523 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2524 {
2525 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2526 struct ras_err_handler_data **data;
2527 u32 max_eeprom_records_count = 0;
2528 bool exc_err_limit = false;
2529 int ret;
2530
2531 if (!con || amdgpu_sriov_vf(adev))
2532 return 0;
2533
2534 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2535 * supports RAS and debugfs is enabled, but when
2536 * adev->ras_enabled is unset, i.e. when "ras_enable"
2537 * module parameter is set to 0.
2538 */
2539 con->adev = adev;
2540
2541 if (!adev->ras_enabled)
2542 return 0;
2543
2544 data = &con->eh_data;
2545 *data = kzalloc(sizeof(**data), GFP_KERNEL);
2546 if (!*data) {
2547 ret = -ENOMEM;
2548 goto out;
2549 }
2550
2551 mutex_init(&con->recovery_lock);
2552 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2553 atomic_set(&con->in_recovery, 0);
2554 con->eeprom_control.bad_channel_bitmap = 0;
2555
2556 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2557 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2558
2559 /* Todo: During test the SMU might fail to read the eeprom through I2C
2560 * when the GPU is pending on XGMI reset during probe time
2561 * (Mostly after second bus reset), skip it now
2562 */
2563 if (adev->gmc.xgmi.pending_reset)
2564 return 0;
2565 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2566 /*
2567 * This calling fails when exc_err_limit is true or
2568 * ret != 0.
2569 */
2570 if (exc_err_limit || ret)
2571 goto free;
2572
2573 if (con->eeprom_control.ras_num_recs) {
2574 ret = amdgpu_ras_load_bad_pages(adev);
2575 if (ret)
2576 goto free;
2577
2578 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2579
2580 if (con->update_channel_flag == true) {
2581 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2582 con->update_channel_flag = false;
2583 }
2584 }
2585
2586 #ifdef CONFIG_X86_MCE_AMD
2587 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2588 (adev->gmc.xgmi.connected_to_cpu))
2589 amdgpu_register_bad_pages_mca_notifier(adev);
2590 #endif
2591 return 0;
2592
2593 free:
2594 kfree((*data)->bps);
2595 kfree(*data);
2596 con->eh_data = NULL;
2597 out:
2598 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2599
2600 /*
2601 * Except error threshold exceeding case, other failure cases in this
2602 * function would not fail amdgpu driver init.
2603 */
2604 if (!exc_err_limit)
2605 ret = 0;
2606 else
2607 ret = -EINVAL;
2608
2609 return ret;
2610 }
2611
amdgpu_ras_recovery_fini(struct amdgpu_device * adev)2612 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2613 {
2614 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2615 struct ras_err_handler_data *data = con->eh_data;
2616
2617 /* recovery_init failed to init it, fini is useless */
2618 if (!data)
2619 return 0;
2620
2621 cancel_work_sync(&con->recovery_work);
2622
2623 mutex_lock(&con->recovery_lock);
2624 con->eh_data = NULL;
2625 kfree(data->bps);
2626 kfree(data);
2627 mutex_unlock(&con->recovery_lock);
2628
2629 return 0;
2630 }
2631 /* recovery end */
2632
amdgpu_ras_asic_supported(struct amdgpu_device * adev)2633 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2634 {
2635 if (amdgpu_sriov_vf(adev)) {
2636 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2637 case IP_VERSION(13, 0, 2):
2638 case IP_VERSION(13, 0, 6):
2639 return true;
2640 default:
2641 return false;
2642 }
2643 }
2644
2645 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2646 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2647 case IP_VERSION(13, 0, 0):
2648 case IP_VERSION(13, 0, 6):
2649 case IP_VERSION(13, 0, 10):
2650 return true;
2651 default:
2652 return false;
2653 }
2654 }
2655
2656 return adev->asic_type == CHIP_VEGA10 ||
2657 adev->asic_type == CHIP_VEGA20 ||
2658 adev->asic_type == CHIP_ARCTURUS ||
2659 adev->asic_type == CHIP_ALDEBARAN ||
2660 adev->asic_type == CHIP_SIENNA_CICHLID;
2661 }
2662
2663 /*
2664 * this is workaround for vega20 workstation sku,
2665 * force enable gfx ras, ignore vbios gfx ras flag
2666 * due to GC EDC can not write
2667 */
amdgpu_ras_get_quirks(struct amdgpu_device * adev)2668 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2669 {
2670 struct atom_context *ctx = adev->mode_info.atom_context;
2671
2672 if (!ctx)
2673 return;
2674
2675 if (strnstr(ctx->vbios_pn, "D16406",
2676 sizeof(ctx->vbios_pn)) ||
2677 strnstr(ctx->vbios_pn, "D36002",
2678 sizeof(ctx->vbios_pn)))
2679 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2680 }
2681
2682 /*
2683 * check hardware's ras ability which will be saved in hw_supported.
2684 * if hardware does not support ras, we can skip some ras initializtion and
2685 * forbid some ras operations from IP.
2686 * if software itself, say boot parameter, limit the ras ability. We still
2687 * need allow IP do some limited operations, like disable. In such case,
2688 * we have to initialize ras as normal. but need check if operation is
2689 * allowed or not in each function.
2690 */
amdgpu_ras_check_supported(struct amdgpu_device * adev)2691 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2692 {
2693 adev->ras_hw_enabled = adev->ras_enabled = 0;
2694
2695 if (!amdgpu_ras_asic_supported(adev))
2696 return;
2697
2698 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2699 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2700 dev_info(adev->dev, "MEM ECC is active.\n");
2701 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2702 1 << AMDGPU_RAS_BLOCK__DF);
2703 } else {
2704 dev_info(adev->dev, "MEM ECC is not presented.\n");
2705 }
2706
2707 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2708 dev_info(adev->dev, "SRAM ECC is active.\n");
2709 if (!amdgpu_sriov_vf(adev))
2710 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2711 1 << AMDGPU_RAS_BLOCK__DF);
2712 else
2713 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2714 1 << AMDGPU_RAS_BLOCK__SDMA |
2715 1 << AMDGPU_RAS_BLOCK__GFX);
2716
2717 /* VCN/JPEG RAS can be supported on both bare metal and
2718 * SRIOV environment
2719 */
2720 if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2721 IP_VERSION(2, 6, 0) ||
2722 amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2723 IP_VERSION(4, 0, 0) ||
2724 amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2725 IP_VERSION(4, 0, 3))
2726 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2727 1 << AMDGPU_RAS_BLOCK__JPEG);
2728 else
2729 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2730 1 << AMDGPU_RAS_BLOCK__JPEG);
2731
2732 /*
2733 * XGMI RAS is not supported if xgmi num physical nodes
2734 * is zero
2735 */
2736 if (!adev->gmc.xgmi.num_physical_nodes)
2737 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2738 } else {
2739 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2740 }
2741 } else {
2742 /* driver only manages a few IP blocks RAS feature
2743 * when GPU is connected cpu through XGMI */
2744 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2745 1 << AMDGPU_RAS_BLOCK__SDMA |
2746 1 << AMDGPU_RAS_BLOCK__MMHUB);
2747 }
2748
2749 amdgpu_ras_get_quirks(adev);
2750
2751 /* hw_supported needs to be aligned with RAS block mask. */
2752 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2753
2754 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2755 adev->ras_hw_enabled & amdgpu_ras_mask;
2756 }
2757
amdgpu_ras_counte_dw(struct work_struct * work)2758 static void amdgpu_ras_counte_dw(struct work_struct *work)
2759 {
2760 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2761 ras_counte_delay_work.work);
2762 struct amdgpu_device *adev = con->adev;
2763 struct drm_device *dev = adev_to_drm(adev);
2764 unsigned long ce_count, ue_count;
2765 int res;
2766
2767 res = pm_runtime_get_sync(dev->dev);
2768 if (res < 0)
2769 goto Out;
2770
2771 /* Cache new values.
2772 */
2773 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2774 atomic_set(&con->ras_ce_count, ce_count);
2775 atomic_set(&con->ras_ue_count, ue_count);
2776 }
2777
2778 pm_runtime_mark_last_busy(dev->dev);
2779 Out:
2780 pm_runtime_put_autosuspend(dev->dev);
2781 }
2782
amdgpu_ras_query_poison_mode(struct amdgpu_device * adev)2783 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2784 {
2785 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2786 bool df_poison, umc_poison;
2787
2788 /* poison setting is useless on SRIOV guest */
2789 if (amdgpu_sriov_vf(adev) || !con)
2790 return;
2791
2792 /* Init poison supported flag, the default value is false */
2793 if (adev->gmc.xgmi.connected_to_cpu ||
2794 adev->gmc.is_app_apu) {
2795 /* enabled by default when GPU is connected to CPU */
2796 con->poison_supported = true;
2797 } else if (adev->df.funcs &&
2798 adev->df.funcs->query_ras_poison_mode &&
2799 adev->umc.ras &&
2800 adev->umc.ras->query_ras_poison_mode) {
2801 df_poison =
2802 adev->df.funcs->query_ras_poison_mode(adev);
2803 umc_poison =
2804 adev->umc.ras->query_ras_poison_mode(adev);
2805
2806 /* Only poison is set in both DF and UMC, we can support it */
2807 if (df_poison && umc_poison)
2808 con->poison_supported = true;
2809 else if (df_poison != umc_poison)
2810 dev_warn(adev->dev,
2811 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2812 df_poison, umc_poison);
2813 }
2814 }
2815
amdgpu_get_ras_schema(struct amdgpu_device * adev)2816 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
2817 {
2818 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
2819 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
2820 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
2821 AMDGPU_RAS_ERROR__PARITY;
2822 }
2823
amdgpu_ras_init(struct amdgpu_device * adev)2824 int amdgpu_ras_init(struct amdgpu_device *adev)
2825 {
2826 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2827 int r;
2828
2829 if (con)
2830 return 0;
2831
2832 con = kzalloc(sizeof(*con) +
2833 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2834 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2835 GFP_KERNEL);
2836 if (!con)
2837 return -ENOMEM;
2838
2839 con->adev = adev;
2840 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2841 atomic_set(&con->ras_ce_count, 0);
2842 atomic_set(&con->ras_ue_count, 0);
2843
2844 con->objs = (struct ras_manager *)(con + 1);
2845
2846 amdgpu_ras_set_context(adev, con);
2847
2848 amdgpu_ras_check_supported(adev);
2849
2850 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2851 /* set gfx block ras context feature for VEGA20 Gaming
2852 * send ras disable cmd to ras ta during ras late init.
2853 */
2854 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2855 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2856
2857 return 0;
2858 }
2859
2860 r = 0;
2861 goto release_con;
2862 }
2863
2864 con->update_channel_flag = false;
2865 con->features = 0;
2866 con->schema = 0;
2867 INIT_LIST_HEAD(&con->head);
2868 /* Might need get this flag from vbios. */
2869 con->flags = RAS_DEFAULT_FLAGS;
2870
2871 /* initialize nbio ras function ahead of any other
2872 * ras functions so hardware fatal error interrupt
2873 * can be enabled as early as possible */
2874 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2875 case IP_VERSION(7, 4, 0):
2876 case IP_VERSION(7, 4, 1):
2877 case IP_VERSION(7, 4, 4):
2878 if (!adev->gmc.xgmi.connected_to_cpu)
2879 adev->nbio.ras = &nbio_v7_4_ras;
2880 break;
2881 case IP_VERSION(4, 3, 0):
2882 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2883 /* unlike other generation of nbio ras,
2884 * nbio v4_3 only support fatal error interrupt
2885 * to inform software that DF is freezed due to
2886 * system fatal error event. driver should not
2887 * enable nbio ras in such case. Instead,
2888 * check DF RAS */
2889 adev->nbio.ras = &nbio_v4_3_ras;
2890 break;
2891 case IP_VERSION(7, 9, 0):
2892 if (!adev->gmc.is_app_apu)
2893 adev->nbio.ras = &nbio_v7_9_ras;
2894 break;
2895 default:
2896 /* nbio ras is not available */
2897 break;
2898 }
2899
2900 /* nbio ras block needs to be enabled ahead of other ras blocks
2901 * to handle fatal error */
2902 r = amdgpu_nbio_ras_sw_init(adev);
2903 if (r)
2904 return r;
2905
2906 if (adev->nbio.ras &&
2907 adev->nbio.ras->init_ras_controller_interrupt) {
2908 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2909 if (r)
2910 goto release_con;
2911 }
2912
2913 if (adev->nbio.ras &&
2914 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2915 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2916 if (r)
2917 goto release_con;
2918 }
2919
2920 amdgpu_ras_query_poison_mode(adev);
2921
2922 /* Packed socket_id to ras feature mask bits[31:29] */
2923 if (adev->smuio.funcs &&
2924 adev->smuio.funcs->get_socket_id)
2925 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
2926
2927 /* Get RAS schema for particular SOC */
2928 con->schema = amdgpu_get_ras_schema(adev);
2929
2930 if (amdgpu_ras_fs_init(adev)) {
2931 r = -EINVAL;
2932 goto release_con;
2933 }
2934
2935 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2936 "hardware ability[%x] ras_mask[%x]\n",
2937 adev->ras_hw_enabled, adev->ras_enabled);
2938
2939 return 0;
2940 release_con:
2941 amdgpu_ras_set_context(adev, NULL);
2942 kfree(con);
2943
2944 return r;
2945 }
2946
amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device * adev)2947 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2948 {
2949 if (adev->gmc.xgmi.connected_to_cpu ||
2950 adev->gmc.is_app_apu)
2951 return 1;
2952 return 0;
2953 }
2954
amdgpu_persistent_edc_harvesting(struct amdgpu_device * adev,struct ras_common_if * ras_block)2955 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2956 struct ras_common_if *ras_block)
2957 {
2958 struct ras_query_if info = {
2959 .head = *ras_block,
2960 };
2961
2962 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2963 return 0;
2964
2965 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2966 DRM_WARN("RAS init harvest failure");
2967
2968 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2969 DRM_WARN("RAS init harvest reset failure");
2970
2971 return 0;
2972 }
2973
amdgpu_ras_is_poison_mode_supported(struct amdgpu_device * adev)2974 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2975 {
2976 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2977
2978 if (!con)
2979 return false;
2980
2981 return con->poison_supported;
2982 }
2983
2984 /* helper function to handle common stuff in ip late init phase */
amdgpu_ras_block_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)2985 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2986 struct ras_common_if *ras_block)
2987 {
2988 struct amdgpu_ras_block_object *ras_obj = NULL;
2989 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2990 struct ras_query_if *query_info;
2991 unsigned long ue_count, ce_count;
2992 int r;
2993
2994 /* disable RAS feature per IP block if it is not supported */
2995 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2996 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2997 return 0;
2998 }
2999
3000 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3001 if (r) {
3002 if (adev->in_suspend || amdgpu_in_reset(adev)) {
3003 /* in resume phase, if fail to enable ras,
3004 * clean up all ras fs nodes, and disable ras */
3005 goto cleanup;
3006 } else
3007 return r;
3008 }
3009
3010 /* check for errors on warm reset edc persisant supported ASIC */
3011 amdgpu_persistent_edc_harvesting(adev, ras_block);
3012
3013 /* in resume phase, no need to create ras fs node */
3014 if (adev->in_suspend || amdgpu_in_reset(adev))
3015 return 0;
3016
3017 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3018 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3019 (ras_obj->hw_ops->query_poison_status ||
3020 ras_obj->hw_ops->handle_poison_consumption))) {
3021 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3022 if (r)
3023 goto cleanup;
3024 }
3025
3026 if (ras_obj->hw_ops &&
3027 (ras_obj->hw_ops->query_ras_error_count ||
3028 ras_obj->hw_ops->query_ras_error_status)) {
3029 r = amdgpu_ras_sysfs_create(adev, ras_block);
3030 if (r)
3031 goto interrupt;
3032
3033 /* Those are the cached values at init.
3034 */
3035 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3036 if (!query_info)
3037 return -ENOMEM;
3038 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3039
3040 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3041 atomic_set(&con->ras_ce_count, ce_count);
3042 atomic_set(&con->ras_ue_count, ue_count);
3043 }
3044
3045 kfree(query_info);
3046 }
3047
3048 return 0;
3049
3050 interrupt:
3051 if (ras_obj->ras_cb)
3052 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3053 cleanup:
3054 amdgpu_ras_feature_enable(adev, ras_block, 0);
3055 return r;
3056 }
3057
amdgpu_ras_block_late_init_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)3058 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3059 struct ras_common_if *ras_block)
3060 {
3061 return amdgpu_ras_block_late_init(adev, ras_block);
3062 }
3063
3064 /* helper function to remove ras fs node and interrupt handler */
amdgpu_ras_block_late_fini(struct amdgpu_device * adev,struct ras_common_if * ras_block)3065 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3066 struct ras_common_if *ras_block)
3067 {
3068 struct amdgpu_ras_block_object *ras_obj;
3069 if (!ras_block)
3070 return;
3071
3072 amdgpu_ras_sysfs_remove(adev, ras_block);
3073
3074 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3075 if (ras_obj->ras_cb)
3076 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3077 }
3078
amdgpu_ras_block_late_fini_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)3079 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3080 struct ras_common_if *ras_block)
3081 {
3082 return amdgpu_ras_block_late_fini(adev, ras_block);
3083 }
3084
3085 /* do some init work after IP late init as dependence.
3086 * and it runs in resume/gpu reset/booting up cases.
3087 */
amdgpu_ras_resume(struct amdgpu_device * adev)3088 void amdgpu_ras_resume(struct amdgpu_device *adev)
3089 {
3090 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3091 struct ras_manager *obj, *tmp;
3092
3093 if (!adev->ras_enabled || !con) {
3094 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3095 amdgpu_release_ras_context(adev);
3096
3097 return;
3098 }
3099
3100 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3101 /* Set up all other IPs which are not implemented. There is a
3102 * tricky thing that IP's actual ras error type should be
3103 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3104 * ERROR_NONE make sense anyway.
3105 */
3106 amdgpu_ras_enable_all_features(adev, 1);
3107
3108 /* We enable ras on all hw_supported block, but as boot
3109 * parameter might disable some of them and one or more IP has
3110 * not implemented yet. So we disable them on behalf.
3111 */
3112 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3113 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3114 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3115 /* there should be no any reference. */
3116 WARN_ON(alive_obj(obj));
3117 }
3118 }
3119 }
3120 }
3121
amdgpu_ras_suspend(struct amdgpu_device * adev)3122 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3123 {
3124 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3125
3126 if (!adev->ras_enabled || !con)
3127 return;
3128
3129 amdgpu_ras_disable_all_features(adev, 0);
3130 /* Make sure all ras objects are disabled. */
3131 if (con->features)
3132 amdgpu_ras_disable_all_features(adev, 1);
3133 }
3134
amdgpu_ras_late_init(struct amdgpu_device * adev)3135 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3136 {
3137 struct amdgpu_ras_block_list *node, *tmp;
3138 struct amdgpu_ras_block_object *obj;
3139 int r;
3140
3141 /* Guest side doesn't need init ras feature */
3142 if (amdgpu_sriov_vf(adev))
3143 return 0;
3144
3145 amdgpu_ras_set_mca_debug_mode(adev, false);
3146
3147 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3148 if (!node->ras_obj) {
3149 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3150 continue;
3151 }
3152
3153 obj = node->ras_obj;
3154 if (obj->ras_late_init) {
3155 r = obj->ras_late_init(adev, &obj->ras_comm);
3156 if (r) {
3157 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3158 obj->ras_comm.name, r);
3159 return r;
3160 }
3161 } else
3162 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3163 }
3164
3165 return 0;
3166 }
3167
3168 /* do some fini work before IP fini as dependence */
amdgpu_ras_pre_fini(struct amdgpu_device * adev)3169 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3170 {
3171 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3172
3173 if (!adev->ras_enabled || !con)
3174 return 0;
3175
3176
3177 /* Need disable ras on all IPs here before ip [hw/sw]fini */
3178 if (con->features)
3179 amdgpu_ras_disable_all_features(adev, 0);
3180 amdgpu_ras_recovery_fini(adev);
3181 return 0;
3182 }
3183
amdgpu_ras_fini(struct amdgpu_device * adev)3184 int amdgpu_ras_fini(struct amdgpu_device *adev)
3185 {
3186 struct amdgpu_ras_block_list *ras_node, *tmp;
3187 struct amdgpu_ras_block_object *obj = NULL;
3188 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3189
3190 if (!adev->ras_enabled || !con)
3191 return 0;
3192
3193 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3194 if (ras_node->ras_obj) {
3195 obj = ras_node->ras_obj;
3196 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3197 obj->ras_fini)
3198 obj->ras_fini(adev, &obj->ras_comm);
3199 else
3200 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3201 }
3202
3203 /* Clear ras blocks from ras_list and free ras block list node */
3204 list_del(&ras_node->node);
3205 kfree(ras_node);
3206 }
3207
3208 amdgpu_ras_fs_fini(adev);
3209 amdgpu_ras_interrupt_remove_all(adev);
3210
3211 WARN(con->features, "Feature mask is not cleared");
3212
3213 if (con->features)
3214 amdgpu_ras_disable_all_features(adev, 1);
3215
3216 cancel_delayed_work_sync(&con->ras_counte_delay_work);
3217
3218 amdgpu_ras_set_context(adev, NULL);
3219 kfree(con);
3220
3221 return 0;
3222 }
3223
amdgpu_ras_global_ras_isr(struct amdgpu_device * adev)3224 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3225 {
3226 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3227 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3228
3229 dev_info(adev->dev, "uncorrectable hardware error"
3230 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3231
3232 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3233 amdgpu_ras_reset_gpu(adev);
3234 }
3235 }
3236
amdgpu_ras_need_emergency_restart(struct amdgpu_device * adev)3237 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3238 {
3239 if (adev->asic_type == CHIP_VEGA20 &&
3240 adev->pm.fw_version <= 0x283400) {
3241 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3242 amdgpu_ras_intr_triggered();
3243 }
3244
3245 return false;
3246 }
3247
amdgpu_release_ras_context(struct amdgpu_device * adev)3248 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3249 {
3250 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3251
3252 if (!con)
3253 return;
3254
3255 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3256 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3257 amdgpu_ras_set_context(adev, NULL);
3258 kfree(con);
3259 }
3260 }
3261
3262 #ifdef CONFIG_X86_MCE_AMD
find_adev(uint32_t node_id)3263 static struct amdgpu_device *find_adev(uint32_t node_id)
3264 {
3265 int i;
3266 struct amdgpu_device *adev = NULL;
3267
3268 for (i = 0; i < mce_adev_list.num_gpu; i++) {
3269 adev = mce_adev_list.devs[i];
3270
3271 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3272 adev->gmc.xgmi.physical_node_id == node_id)
3273 break;
3274 adev = NULL;
3275 }
3276
3277 return adev;
3278 }
3279
3280 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3281 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3282 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3283 #define GPU_ID_OFFSET 8
3284
amdgpu_bad_page_notifier(struct notifier_block * nb,unsigned long val,void * data)3285 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3286 unsigned long val, void *data)
3287 {
3288 struct mce *m = (struct mce *)data;
3289 struct amdgpu_device *adev = NULL;
3290 uint32_t gpu_id = 0;
3291 uint32_t umc_inst = 0, ch_inst = 0;
3292
3293 /*
3294 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3295 * and error occurred in DramECC (Extended error code = 0) then only
3296 * process the error, else bail out.
3297 */
3298 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3299 (XEC(m->status, 0x3f) == 0x0)))
3300 return NOTIFY_DONE;
3301
3302 /*
3303 * If it is correctable error, return.
3304 */
3305 if (mce_is_correctable(m))
3306 return NOTIFY_OK;
3307
3308 /*
3309 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3310 */
3311 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3312
3313 adev = find_adev(gpu_id);
3314 if (!adev) {
3315 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3316 gpu_id);
3317 return NOTIFY_DONE;
3318 }
3319
3320 /*
3321 * If it is uncorrectable error, then find out UMC instance and
3322 * channel index.
3323 */
3324 umc_inst = GET_UMC_INST(m->ipid);
3325 ch_inst = GET_CHAN_INDEX(m->ipid);
3326
3327 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3328 umc_inst, ch_inst);
3329
3330 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3331 return NOTIFY_OK;
3332 else
3333 return NOTIFY_DONE;
3334 }
3335
3336 static struct notifier_block amdgpu_bad_page_nb = {
3337 .notifier_call = amdgpu_bad_page_notifier,
3338 .priority = MCE_PRIO_UC,
3339 };
3340
amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device * adev)3341 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3342 {
3343 /*
3344 * Add the adev to the mce_adev_list.
3345 * During mode2 reset, amdgpu device is temporarily
3346 * removed from the mgpu_info list which can cause
3347 * page retirement to fail.
3348 * Use this list instead of mgpu_info to find the amdgpu
3349 * device on which the UMC error was reported.
3350 */
3351 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3352
3353 /*
3354 * Register the x86 notifier only once
3355 * with MCE subsystem.
3356 */
3357 if (notifier_registered == false) {
3358 mce_register_decode_chain(&amdgpu_bad_page_nb);
3359 notifier_registered = true;
3360 }
3361 }
3362 #endif
3363
amdgpu_ras_get_context(struct amdgpu_device * adev)3364 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3365 {
3366 if (!adev)
3367 return NULL;
3368
3369 return adev->psp.ras_context.ras;
3370 }
3371
amdgpu_ras_set_context(struct amdgpu_device * adev,struct amdgpu_ras * ras_con)3372 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3373 {
3374 if (!adev)
3375 return -EINVAL;
3376
3377 adev->psp.ras_context.ras = ras_con;
3378 return 0;
3379 }
3380
3381 /* check if ras is supported on block, say, sdma, gfx */
amdgpu_ras_is_supported(struct amdgpu_device * adev,unsigned int block)3382 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3383 unsigned int block)
3384 {
3385 int ret = 0;
3386 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3387
3388 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3389 return 0;
3390
3391 ret = ras && (adev->ras_enabled & (1 << block));
3392
3393 /* For the special asic with mem ecc enabled but sram ecc
3394 * not enabled, even if the ras block is not supported on
3395 * .ras_enabled, if the asic supports poison mode and the
3396 * ras block has ras configuration, it can be considered
3397 * that the ras block supports ras function.
3398 */
3399 if (!ret &&
3400 (block == AMDGPU_RAS_BLOCK__GFX ||
3401 block == AMDGPU_RAS_BLOCK__SDMA ||
3402 block == AMDGPU_RAS_BLOCK__VCN ||
3403 block == AMDGPU_RAS_BLOCK__JPEG) &&
3404 amdgpu_ras_is_poison_mode_supported(adev) &&
3405 amdgpu_ras_get_ras_block(adev, block, 0))
3406 ret = 1;
3407
3408 return ret;
3409 }
3410
amdgpu_ras_reset_gpu(struct amdgpu_device * adev)3411 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3412 {
3413 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3414
3415 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3416 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3417 return 0;
3418 }
3419
amdgpu_ras_set_mca_debug_mode(struct amdgpu_device * adev,bool enable)3420 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3421 {
3422 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3423 int ret = 0;
3424
3425 if (con) {
3426 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3427 if (!ret)
3428 con->is_mca_debug_mode = enable;
3429 }
3430
3431 return ret;
3432 }
3433
amdgpu_ras_get_mca_debug_mode(struct amdgpu_device * adev)3434 bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
3435 {
3436 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3437 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3438
3439 if (!con)
3440 return false;
3441
3442 if (mca_funcs && mca_funcs->mca_set_debug_mode)
3443 return con->is_mca_debug_mode;
3444 else
3445 return true;
3446 }
3447
amdgpu_ras_get_error_query_mode(struct amdgpu_device * adev,unsigned int * error_query_mode)3448 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
3449 unsigned int *error_query_mode)
3450 {
3451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3452 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3453
3454 if (!con) {
3455 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
3456 return false;
3457 }
3458
3459 if (mca_funcs && mca_funcs->mca_set_debug_mode)
3460 *error_query_mode =
3461 (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
3462 else
3463 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
3464
3465 return true;
3466 }
3467
3468 /* Register each ip ras block into amdgpu ras */
amdgpu_ras_register_ras_block(struct amdgpu_device * adev,struct amdgpu_ras_block_object * ras_block_obj)3469 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3470 struct amdgpu_ras_block_object *ras_block_obj)
3471 {
3472 struct amdgpu_ras_block_list *ras_node;
3473 if (!adev || !ras_block_obj)
3474 return -EINVAL;
3475
3476 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3477 if (!ras_node)
3478 return -ENOMEM;
3479
3480 INIT_LIST_HEAD(&ras_node->node);
3481 ras_node->ras_obj = ras_block_obj;
3482 list_add_tail(&ras_node->node, &adev->ras_list);
3483
3484 return 0;
3485 }
3486
amdgpu_ras_get_error_type_name(uint32_t err_type,char * err_type_name)3487 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3488 {
3489 if (!err_type_name)
3490 return;
3491
3492 switch (err_type) {
3493 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3494 sprintf(err_type_name, "correctable");
3495 break;
3496 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3497 sprintf(err_type_name, "uncorrectable");
3498 break;
3499 default:
3500 sprintf(err_type_name, "unknown");
3501 break;
3502 }
3503 }
3504
amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,uint32_t * memory_id)3505 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3506 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3507 uint32_t instance,
3508 uint32_t *memory_id)
3509 {
3510 uint32_t err_status_lo_data, err_status_lo_offset;
3511
3512 if (!reg_entry)
3513 return false;
3514
3515 err_status_lo_offset =
3516 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3517 reg_entry->seg_lo, reg_entry->reg_lo);
3518 err_status_lo_data = RREG32(err_status_lo_offset);
3519
3520 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3521 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3522 return false;
3523
3524 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3525
3526 return true;
3527 }
3528
amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,unsigned long * err_cnt)3529 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3530 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3531 uint32_t instance,
3532 unsigned long *err_cnt)
3533 {
3534 uint32_t err_status_hi_data, err_status_hi_offset;
3535
3536 if (!reg_entry)
3537 return false;
3538
3539 err_status_hi_offset =
3540 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3541 reg_entry->seg_hi, reg_entry->reg_hi);
3542 err_status_hi_data = RREG32(err_status_hi_offset);
3543
3544 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3545 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3546 /* keep the check here in case we need to refer to the result later */
3547 dev_dbg(adev->dev, "Invalid err_info field\n");
3548
3549 /* read err count */
3550 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3551
3552 return true;
3553 }
3554
amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,const struct amdgpu_ras_memory_id_entry * mem_list,uint32_t mem_list_size,uint32_t instance,uint32_t err_type,unsigned long * err_count)3555 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3556 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3557 uint32_t reg_list_size,
3558 const struct amdgpu_ras_memory_id_entry *mem_list,
3559 uint32_t mem_list_size,
3560 uint32_t instance,
3561 uint32_t err_type,
3562 unsigned long *err_count)
3563 {
3564 uint32_t memory_id;
3565 unsigned long err_cnt;
3566 char err_type_name[16];
3567 uint32_t i, j;
3568
3569 for (i = 0; i < reg_list_size; i++) {
3570 /* query memory_id from err_status_lo */
3571 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3572 instance, &memory_id))
3573 continue;
3574
3575 /* query err_cnt from err_status_hi */
3576 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3577 instance, &err_cnt) ||
3578 !err_cnt)
3579 continue;
3580
3581 *err_count += err_cnt;
3582
3583 /* log the errors */
3584 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3585 if (!mem_list) {
3586 /* memory_list is not supported */
3587 dev_info(adev->dev,
3588 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3589 err_cnt, err_type_name,
3590 reg_list[i].block_name,
3591 instance, memory_id);
3592 } else {
3593 for (j = 0; j < mem_list_size; j++) {
3594 if (memory_id == mem_list[j].memory_id) {
3595 dev_info(adev->dev,
3596 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3597 err_cnt, err_type_name,
3598 reg_list[i].block_name,
3599 instance, mem_list[j].name);
3600 break;
3601 }
3602 }
3603 }
3604 }
3605 }
3606
amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,uint32_t instance)3607 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3608 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3609 uint32_t reg_list_size,
3610 uint32_t instance)
3611 {
3612 uint32_t err_status_lo_offset, err_status_hi_offset;
3613 uint32_t i;
3614
3615 for (i = 0; i < reg_list_size; i++) {
3616 err_status_lo_offset =
3617 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3618 reg_list[i].seg_lo, reg_list[i].reg_lo);
3619 err_status_hi_offset =
3620 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3621 reg_list[i].seg_hi, reg_list[i].reg_hi);
3622 WREG32(err_status_lo_offset, 0);
3623 WREG32(err_status_hi_offset, 0);
3624 }
3625 }
3626
amdgpu_ras_error_data_init(struct ras_err_data * err_data)3627 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
3628 {
3629 memset(err_data, 0, sizeof(*err_data));
3630
3631 INIT_LIST_HEAD(&err_data->err_node_list);
3632
3633 return 0;
3634 }
3635
amdgpu_ras_error_node_release(struct ras_err_node * err_node)3636 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
3637 {
3638 if (!err_node)
3639 return;
3640
3641 list_del(&err_node->node);
3642 kvfree(err_node);
3643 }
3644
amdgpu_ras_error_data_fini(struct ras_err_data * err_data)3645 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
3646 {
3647 struct ras_err_node *err_node, *tmp;
3648
3649 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
3650 amdgpu_ras_error_node_release(err_node);
3651 }
3652
amdgpu_ras_error_find_node_by_id(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info)3653 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
3654 struct amdgpu_smuio_mcm_config_info *mcm_info)
3655 {
3656 struct ras_err_node *err_node;
3657 struct amdgpu_smuio_mcm_config_info *ref_id;
3658
3659 if (!err_data || !mcm_info)
3660 return NULL;
3661
3662 for_each_ras_error(err_node, err_data) {
3663 ref_id = &err_node->err_info.mcm_info;
3664
3665 if (mcm_info->socket_id == ref_id->socket_id &&
3666 mcm_info->die_id == ref_id->die_id)
3667 return err_node;
3668 }
3669
3670 return NULL;
3671 }
3672
amdgpu_ras_error_node_new(void)3673 static struct ras_err_node *amdgpu_ras_error_node_new(void)
3674 {
3675 struct ras_err_node *err_node;
3676
3677 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
3678 if (!err_node)
3679 return NULL;
3680
3681 INIT_LIST_HEAD(&err_node->node);
3682
3683 return err_node;
3684 }
3685
ras_err_info_cmp(void * priv,const struct list_head * a,const struct list_head * b)3686 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
3687 {
3688 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
3689 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
3690 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
3691 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
3692
3693 if (unlikely(infoa->socket_id != infob->socket_id))
3694 return infoa->socket_id - infob->socket_id;
3695 else
3696 return infoa->die_id - infob->die_id;
3697
3698 return 0;
3699 }
3700
amdgpu_ras_error_get_info(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,struct ras_err_addr * err_addr)3701 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
3702 struct amdgpu_smuio_mcm_config_info *mcm_info,
3703 struct ras_err_addr *err_addr)
3704 {
3705 struct ras_err_node *err_node;
3706
3707 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
3708 if (err_node)
3709 return &err_node->err_info;
3710
3711 err_node = amdgpu_ras_error_node_new();
3712 if (!err_node)
3713 return NULL;
3714
3715 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
3716
3717 if (err_addr)
3718 memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
3719
3720 err_data->err_list_count++;
3721 list_add_tail(&err_node->node, &err_data->err_node_list);
3722 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
3723
3724 return &err_node->err_info;
3725 }
3726
amdgpu_ras_error_statistic_ue_count(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,struct ras_err_addr * err_addr,u64 count)3727 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
3728 struct amdgpu_smuio_mcm_config_info *mcm_info,
3729 struct ras_err_addr *err_addr, u64 count)
3730 {
3731 struct ras_err_info *err_info;
3732
3733 if (!err_data || !mcm_info)
3734 return -EINVAL;
3735
3736 if (!count)
3737 return 0;
3738
3739 err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
3740 if (!err_info)
3741 return -EINVAL;
3742
3743 err_info->ue_count += count;
3744 err_data->ue_count += count;
3745
3746 return 0;
3747 }
3748
amdgpu_ras_error_statistic_ce_count(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,struct ras_err_addr * err_addr,u64 count)3749 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
3750 struct amdgpu_smuio_mcm_config_info *mcm_info,
3751 struct ras_err_addr *err_addr, u64 count)
3752 {
3753 struct ras_err_info *err_info;
3754
3755 if (!err_data || !mcm_info)
3756 return -EINVAL;
3757
3758 if (!count)
3759 return 0;
3760
3761 err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
3762 if (!err_info)
3763 return -EINVAL;
3764
3765 err_info->ce_count += count;
3766 err_data->ce_count += count;
3767
3768 return 0;
3769 }
3770