2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
44 #ifdef CONFIG_X86_MCE_AMD
47 static bool notifier_registered;
49 static const char *RAS_FS_NAME = "ras";
51 const char *ras_error_string[] = {
55 "multi_uncorrectable",
59 const char *ras_block_string[] = {
81 const char *ras_mca_block_string[] = {
88 struct amdgpu_ras_block_list {
90 struct list_head node;
92 struct amdgpu_ras_block_object *ras_obj;
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
100 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101 ras_block->block >= ARRAY_SIZE(ras_block_string))
102 return "OUT OF RANGE";
104 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105 return ras_mca_block_string[ras_block->sub_block_index];
107 return ras_block_string[ras_block->block];
110 #define ras_block_str(_BLOCK_) \
111 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
117 /* inject address is 52 bits */
118 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms
125 enum amdgpu_ras_retire_page_reservation {
126 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
127 AMDGPU_RAS_RETIRE_PAGE_PENDING,
128 AMDGPU_RAS_RETIRE_PAGE_FAULT,
131 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
133 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
135 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
137 #ifdef CONFIG_X86_MCE_AMD
138 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
139 struct mce_notifier_adev_list {
140 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
143 static struct mce_notifier_adev_list mce_adev_list;
146 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
148 if (adev && amdgpu_ras_get_context(adev))
149 amdgpu_ras_get_context(adev)->error_query_ready = ready;
152 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
154 if (adev && amdgpu_ras_get_context(adev))
155 return amdgpu_ras_get_context(adev)->error_query_ready;
160 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
162 struct ras_err_data err_data;
163 struct eeprom_table_record err_rec;
166 if ((address >= adev->gmc.mc_vram_size) ||
167 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
169 "RAS WARN: input address 0x%llx is invalid.\n",
174 if (amdgpu_ras_check_bad_page(adev, address)) {
176 "RAS WARN: 0x%llx has already been marked as bad page!\n",
181 ret = amdgpu_ras_error_data_init(&err_data);
185 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
186 err_data.err_addr = &err_rec;
187 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
189 if (amdgpu_bad_page_threshold != 0) {
190 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
191 err_data.err_addr_cnt);
192 amdgpu_ras_save_bad_pages(adev, NULL);
195 amdgpu_ras_error_data_fini(&err_data);
197 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
198 dev_warn(adev->dev, "Clear EEPROM:\n");
199 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
205 size_t size, loff_t *pos)
207 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
208 struct ras_query_if info = {
214 if (amdgpu_ras_query_error_status(obj->adev, &info))
217 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
218 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
219 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
220 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
221 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
224 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
226 "ce", info.ce_count);
231 s = min_t(u64, s, size);
234 if (copy_to_user(buf, &val[*pos], s))
242 static const struct file_operations amdgpu_ras_debugfs_ops = {
243 .owner = THIS_MODULE,
244 .read = amdgpu_ras_debugfs_read,
246 .llseek = default_llseek
249 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
253 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
255 if (strcmp(name, ras_block_string[i]) == 0)
261 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
262 const char __user *buf, size_t size,
263 loff_t *pos, struct ras_debug_if *data)
265 ssize_t s = min_t(u64, 64, size);
273 /* default value is 0 if the mask is not set by user */
274 u32 instance_mask = 0;
280 memset(str, 0, sizeof(str));
281 memset(data, 0, sizeof(*data));
283 if (copy_from_user(str, buf, s))
286 if (sscanf(str, "disable %32s", block_name) == 1)
288 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
290 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
292 else if (strstr(str, "retire_page") != NULL)
294 else if (str[0] && str[1] && str[2] && str[3])
295 /* ascii string, but commands are not matched. */
300 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
301 sscanf(str, "%*s %llu", &address) != 1)
305 data->inject.address = address;
310 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
313 data->head.block = block_id;
314 /* only ue, ce and poison errors are supported */
315 if (!memcmp("ue", err, 2))
316 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
317 else if (!memcmp("ce", err, 2))
318 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
319 else if (!memcmp("poison", err, 6))
320 data->head.type = AMDGPU_RAS_ERROR__POISON;
327 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
328 &sub_block, &address, &value, &instance_mask) != 4 &&
329 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
330 &sub_block, &address, &value, &instance_mask) != 4 &&
331 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
332 &sub_block, &address, &value) != 3 &&
333 sscanf(str, "%*s %*s %*s %u %llu %llu",
334 &sub_block, &address, &value) != 3)
336 data->head.sub_block_index = sub_block;
337 data->inject.address = address;
338 data->inject.value = value;
339 data->inject.instance_mask = instance_mask;
342 if (size < sizeof(*data))
345 if (copy_from_user(data, buf, sizeof(*data)))
352 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
353 struct ras_debug_if *data)
355 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
356 uint32_t mask, inst_mask = data->inject.instance_mask;
358 /* no need to set instance mask if there is only one instance */
359 if (num_xcc <= 1 && inst_mask) {
360 data->inject.instance_mask = 0;
362 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
368 switch (data->head.block) {
369 case AMDGPU_RAS_BLOCK__GFX:
370 mask = GENMASK(num_xcc - 1, 0);
372 case AMDGPU_RAS_BLOCK__SDMA:
373 mask = GENMASK(adev->sdma.num_instances - 1, 0);
375 case AMDGPU_RAS_BLOCK__VCN:
376 case AMDGPU_RAS_BLOCK__JPEG:
377 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
384 /* remove invalid bits in instance mask */
385 data->inject.instance_mask &= mask;
386 if (inst_mask != data->inject.instance_mask)
388 "Adjust RAS inject mask 0x%x to 0x%x\n",
389 inst_mask, data->inject.instance_mask);
393 * DOC: AMDGPU RAS debugfs control interface
395 * The control interface accepts struct ras_debug_if which has two members.
397 * First member: ras_debug_if::head or ras_debug_if::inject.
399 * head is used to indicate which IP block will be under control.
401 * head has four members, they are block, type, sub_block_index, name.
402 * block: which IP will be under control.
403 * type: what kind of error will be enabled/disabled/injected.
404 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
405 * name: the name of IP.
407 * inject has three more members than head, they are address, value and mask.
408 * As their names indicate, inject operation will write the
409 * value to the address.
411 * The second member: struct ras_debug_if::op.
412 * It has three kinds of operations.
414 * - 0: disable RAS on the block. Take ::head as its data.
415 * - 1: enable RAS on the block. Take ::head as its data.
416 * - 2: inject errors on the block. Take ::inject as its data.
418 * How to use the interface?
422 * Copy the struct ras_debug_if in your code and initialize it.
423 * Write the struct to the control interface.
427 * .. code-block:: bash
429 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
430 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
431 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
433 * Where N, is the card which you want to affect.
435 * "disable" requires only the block.
436 * "enable" requires the block and error type.
437 * "inject" requires the block, error type, address, and value.
439 * The block is one of: umc, sdma, gfx, etc.
440 * see ras_block_string[] for details
442 * The error type is one of: ue, ce and poison where,
443 * ue is multi-uncorrectable
444 * ce is single-correctable
447 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
448 * The address and value are hexadecimal numbers, leading 0x is optional.
449 * The mask means instance mask, is optional, default value is 0x1.
453 * .. code-block:: bash
455 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
456 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
457 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
459 * How to check the result of the operation?
461 * To check disable/enable, see "ras" features at,
462 * /sys/class/drm/card[0/1/2...]/device/ras/features
464 * To check inject, see the corresponding error count at,
465 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
468 * Operations are only allowed on blocks which are supported.
469 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
470 * to see which blocks support RAS on a particular asic.
473 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
474 const char __user *buf,
475 size_t size, loff_t *pos)
477 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
478 struct ras_debug_if data;
481 if (!amdgpu_ras_get_error_query_ready(adev)) {
482 dev_warn(adev->dev, "RAS WARN: error injection "
483 "currently inaccessible\n");
487 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
499 if (!amdgpu_ras_is_supported(adev, data.head.block))
504 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
507 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
510 if ((data.inject.address >= adev->gmc.mc_vram_size &&
511 adev->gmc.mc_vram_size) ||
512 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
513 dev_warn(adev->dev, "RAS WARN: input address "
514 "0x%llx is invalid.",
515 data.inject.address);
520 /* umc ce/ue error injection for a bad page is not allowed */
521 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
522 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
523 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
524 "already been marked as bad!\n",
525 data.inject.address);
529 amdgpu_ras_instance_mask_check(adev, &data);
531 /* data.inject.address is offset instead of absolute gpu address */
532 ret = amdgpu_ras_error_inject(adev, &data.inject);
546 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
548 * Some boards contain an EEPROM which is used to persistently store a list of
549 * bad pages which experiences ECC errors in vram. This interface provides
550 * a way to reset the EEPROM, e.g., after testing error injection.
554 * .. code-block:: bash
556 * echo 1 > ../ras/ras_eeprom_reset
558 * will reset EEPROM table to 0 entries.
561 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
562 const char __user *buf,
563 size_t size, loff_t *pos)
565 struct amdgpu_device *adev =
566 (struct amdgpu_device *)file_inode(f)->i_private;
569 ret = amdgpu_ras_eeprom_reset_table(
570 &(amdgpu_ras_get_context(adev)->eeprom_control));
573 /* Something was written to EEPROM.
575 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
582 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
583 .owner = THIS_MODULE,
585 .write = amdgpu_ras_debugfs_ctrl_write,
586 .llseek = default_llseek
589 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
590 .owner = THIS_MODULE,
592 .write = amdgpu_ras_debugfs_eeprom_write,
593 .llseek = default_llseek
597 * DOC: AMDGPU RAS sysfs Error Count Interface
599 * It allows the user to read the error count for each IP block on the gpu through
600 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
602 * It outputs the multiple lines which report the uncorrected (ue) and corrected
605 * The format of one line is below,
611 * .. code-block:: bash
617 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
618 struct device_attribute *attr, char *buf)
620 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
621 struct ras_query_if info = {
625 if (!amdgpu_ras_get_error_query_ready(obj->adev))
626 return sysfs_emit(buf, "Query currently inaccessible\n");
628 if (amdgpu_ras_query_error_status(obj->adev, &info))
631 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
632 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
633 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
634 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
637 if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
638 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
639 "ce", info.ce_count, "de", info.de_count);
641 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
642 "ce", info.ce_count);
647 #define get_obj(obj) do { (obj)->use++; } while (0)
648 #define alive_obj(obj) ((obj)->use)
650 static inline void put_obj(struct ras_manager *obj)
652 if (obj && (--obj->use == 0)) {
653 list_del(&obj->node);
654 amdgpu_ras_error_data_fini(&obj->err_data);
657 if (obj && (obj->use < 0))
658 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
661 /* make one obj and return it. */
662 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
663 struct ras_common_if *head)
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
666 struct ras_manager *obj;
668 if (!adev->ras_enabled || !con)
671 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
674 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
675 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
678 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
680 obj = &con->objs[head->block];
682 /* already exist. return obj? */
686 if (amdgpu_ras_error_data_init(&obj->err_data))
691 list_add(&obj->node, &con->head);
697 /* return an obj equal to head, or the first when head is NULL */
698 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
699 struct ras_common_if *head)
701 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
702 struct ras_manager *obj;
705 if (!adev->ras_enabled || !con)
709 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
712 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
713 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
716 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
718 obj = &con->objs[head->block];
723 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
734 /* feature ctl begin */
735 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
736 struct ras_common_if *head)
738 return adev->ras_hw_enabled & BIT(head->block);
741 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
742 struct ras_common_if *head)
744 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
746 return con->features & BIT(head->block);
750 * if obj is not created, then create one.
751 * set feature enable flag.
753 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
754 struct ras_common_if *head, int enable)
756 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
757 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
759 /* If hardware does not support ras, then do not create obj.
760 * But if hardware support ras, we can create the obj.
761 * Ras framework checks con->hw_supported to see if it need do
762 * corresponding initialization.
763 * IP checks con->support to see if it need disable ras.
765 if (!amdgpu_ras_is_feature_allowed(adev, head))
770 obj = amdgpu_ras_create_obj(adev, head);
774 /* In case we create obj somewhere else */
777 con->features |= BIT(head->block);
779 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
780 con->features &= ~BIT(head->block);
788 /* wrapper of psp_ras_enable_features */
789 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
790 struct ras_common_if *head, bool enable)
792 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
793 union ta_ras_cmd_input *info;
799 /* For non-gfx ip, do not enable ras feature if it is not allowed */
800 /* For gfx ip, regardless of feature support status, */
801 /* Force issue enable or disable ras feature commands */
802 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
803 !amdgpu_ras_is_feature_allowed(adev, head))
806 /* Only enable gfx ras feature from host side */
807 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
808 !amdgpu_sriov_vf(adev) &&
809 !amdgpu_ras_intr_triggered()) {
810 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815 info->disable_features = (struct ta_ras_disable_features_input) {
816 .block_id = amdgpu_ras_block_to_ta(head->block),
817 .error_type = amdgpu_ras_error_to_ta(head->type),
820 info->enable_features = (struct ta_ras_enable_features_input) {
821 .block_id = amdgpu_ras_block_to_ta(head->block),
822 .error_type = amdgpu_ras_error_to_ta(head->type),
826 ret = psp_ras_enable_features(&adev->psp, info, enable);
828 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
829 enable ? "enable":"disable",
830 get_ras_block_str(head),
831 amdgpu_ras_is_poison_mode_supported(adev), ret);
840 __amdgpu_ras_feature_enable(adev, head, enable);
845 /* Only used in device probe stage and called only once. */
846 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
847 struct ras_common_if *head, bool enable)
849 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
855 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
857 /* There is no harm to issue a ras TA cmd regardless of
858 * the currecnt ras state.
859 * If current state == target state, it will do nothing
860 * But sometimes it requests driver to reset and repost
861 * with error code -EAGAIN.
863 ret = amdgpu_ras_feature_enable(adev, head, 1);
864 /* With old ras TA, we might fail to enable ras.
865 * Log it and just setup the object.
866 * TODO need remove this WA in the future.
868 if (ret == -EINVAL) {
869 ret = __amdgpu_ras_feature_enable(adev, head, 1);
872 "RAS INFO: %s setup object\n",
873 get_ras_block_str(head));
876 /* setup the object then issue a ras TA disable cmd.*/
877 ret = __amdgpu_ras_feature_enable(adev, head, 1);
881 /* gfx block ras dsiable cmd must send to ras-ta */
882 if (head->block == AMDGPU_RAS_BLOCK__GFX)
883 con->features |= BIT(head->block);
885 ret = amdgpu_ras_feature_enable(adev, head, 0);
887 /* clean gfx block ras features flag */
888 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
889 con->features &= ~BIT(head->block);
892 ret = amdgpu_ras_feature_enable(adev, head, enable);
897 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
900 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
901 struct ras_manager *obj, *tmp;
903 list_for_each_entry_safe(obj, tmp, &con->head, node) {
905 * aka just release the obj and corresponding flags
908 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
911 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916 return con->features;
919 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
922 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
924 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
926 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
927 struct ras_common_if head = {
929 .type = default_ras_type,
930 .sub_block_index = 0,
933 if (i == AMDGPU_RAS_BLOCK__MCA)
938 * bypass psp. vbios enable ras for us.
939 * so just create the obj
941 if (__amdgpu_ras_feature_enable(adev, &head, 1))
944 if (amdgpu_ras_feature_enable(adev, &head, 1))
949 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
950 struct ras_common_if head = {
951 .block = AMDGPU_RAS_BLOCK__MCA,
952 .type = default_ras_type,
953 .sub_block_index = i,
958 * bypass psp. vbios enable ras for us.
959 * so just create the obj
961 if (__amdgpu_ras_feature_enable(adev, &head, 1))
964 if (amdgpu_ras_feature_enable(adev, &head, 1))
969 return con->features;
971 /* feature ctl end */
973 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
974 enum amdgpu_ras_block block)
979 if (block_obj->ras_comm.block == block)
985 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
986 enum amdgpu_ras_block block, uint32_t sub_block_index)
988 struct amdgpu_ras_block_list *node, *tmp;
989 struct amdgpu_ras_block_object *obj;
991 if (block >= AMDGPU_RAS_BLOCK__LAST)
994 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
995 if (!node->ras_obj) {
996 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1000 obj = node->ras_obj;
1001 if (obj->ras_block_match) {
1002 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1005 if (amdgpu_ras_block_match_default(obj, block) == 0)
1013 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1015 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1019 * choosing right query method according to
1020 * whether smu support query error information
1022 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1023 if (ret == -EOPNOTSUPP) {
1024 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1025 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1026 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1028 /* umc query_ras_error_address is also responsible for clearing
1031 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1032 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1033 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1035 if (adev->umc.ras &&
1036 adev->umc.ras->ecc_info_query_ras_error_count)
1037 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1039 if (adev->umc.ras &&
1040 adev->umc.ras->ecc_info_query_ras_error_address)
1041 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1045 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1046 struct ras_manager *ras_mgr,
1047 struct ras_err_data *err_data,
1048 const char *blk_name,
1052 struct amdgpu_smuio_mcm_config_info *mcm_info;
1053 struct ras_err_node *err_node;
1054 struct ras_err_info *err_info;
1057 for_each_ras_error(err_node, err_data) {
1058 err_info = &err_node->err_info;
1059 mcm_info = &err_info->mcm_info;
1060 if (err_info->ue_count) {
1061 dev_info(adev->dev, "socket: %d, die: %d, "
1062 "%lld new uncorrectable hardware errors detected in %s block\n",
1063 mcm_info->socket_id,
1070 for_each_ras_error(err_node, &ras_mgr->err_data) {
1071 err_info = &err_node->err_info;
1072 mcm_info = &err_info->mcm_info;
1073 dev_info(adev->dev, "socket: %d, die: %d, "
1074 "%lld uncorrectable hardware errors detected in total in %s block\n",
1075 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1080 for_each_ras_error(err_node, err_data) {
1081 err_info = &err_node->err_info;
1082 mcm_info = &err_info->mcm_info;
1083 if (err_info->de_count) {
1084 dev_info(adev->dev, "socket: %d, die: %d, "
1085 "%lld new deferred hardware errors detected in %s block\n",
1086 mcm_info->socket_id,
1093 for_each_ras_error(err_node, &ras_mgr->err_data) {
1094 err_info = &err_node->err_info;
1095 mcm_info = &err_info->mcm_info;
1096 dev_info(adev->dev, "socket: %d, die: %d, "
1097 "%lld deferred hardware errors detected in total in %s block\n",
1098 mcm_info->socket_id, mcm_info->die_id,
1099 err_info->de_count, blk_name);
1102 for_each_ras_error(err_node, err_data) {
1103 err_info = &err_node->err_info;
1104 mcm_info = &err_info->mcm_info;
1105 if (err_info->ce_count) {
1106 dev_info(adev->dev, "socket: %d, die: %d, "
1107 "%lld new correctable hardware errors detected in %s block\n",
1108 mcm_info->socket_id,
1115 for_each_ras_error(err_node, &ras_mgr->err_data) {
1116 err_info = &err_node->err_info;
1117 mcm_info = &err_info->mcm_info;
1118 dev_info(adev->dev, "socket: %d, die: %d, "
1119 "%lld correctable hardware errors detected in total in %s block\n",
1120 mcm_info->socket_id, mcm_info->die_id,
1121 err_info->ce_count, blk_name);
1127 static inline bool err_data_has_source_info(struct ras_err_data *data)
1129 return !list_empty(&data->err_node_list);
1132 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1133 struct ras_query_if *query_if,
1134 struct ras_err_data *err_data)
1136 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1137 const char *blk_name = get_ras_block_str(&query_if->head);
1139 if (err_data->ce_count) {
1140 if (err_data_has_source_info(err_data)) {
1141 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1142 blk_name, false, false);
1143 } else if (!adev->aid_mask &&
1144 adev->smuio.funcs &&
1145 adev->smuio.funcs->get_socket_id &&
1146 adev->smuio.funcs->get_die_id) {
1147 dev_info(adev->dev, "socket: %d, die: %d "
1148 "%ld correctable hardware errors "
1149 "detected in %s block\n",
1150 adev->smuio.funcs->get_socket_id(adev),
1151 adev->smuio.funcs->get_die_id(adev),
1152 ras_mgr->err_data.ce_count,
1155 dev_info(adev->dev, "%ld correctable hardware errors "
1156 "detected in %s block\n",
1157 ras_mgr->err_data.ce_count,
1162 if (err_data->ue_count) {
1163 if (err_data_has_source_info(err_data)) {
1164 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1165 blk_name, true, false);
1166 } else if (!adev->aid_mask &&
1167 adev->smuio.funcs &&
1168 adev->smuio.funcs->get_socket_id &&
1169 adev->smuio.funcs->get_die_id) {
1170 dev_info(adev->dev, "socket: %d, die: %d "
1171 "%ld uncorrectable hardware errors "
1172 "detected in %s block\n",
1173 adev->smuio.funcs->get_socket_id(adev),
1174 adev->smuio.funcs->get_die_id(adev),
1175 ras_mgr->err_data.ue_count,
1178 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1179 "detected in %s block\n",
1180 ras_mgr->err_data.ue_count,
1185 if (err_data->de_count) {
1186 if (err_data_has_source_info(err_data)) {
1187 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
1188 blk_name, false, true);
1189 } else if (!adev->aid_mask &&
1190 adev->smuio.funcs &&
1191 adev->smuio.funcs->get_socket_id &&
1192 adev->smuio.funcs->get_die_id) {
1193 dev_info(adev->dev, "socket: %d, die: %d "
1194 "%ld deferred hardware errors "
1195 "detected in %s block\n",
1196 adev->smuio.funcs->get_socket_id(adev),
1197 adev->smuio.funcs->get_die_id(adev),
1198 ras_mgr->err_data.de_count,
1201 dev_info(adev->dev, "%ld deferred hardware errors "
1202 "detected in %s block\n",
1203 ras_mgr->err_data.de_count,
1209 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1211 struct ras_err_node *err_node;
1212 struct ras_err_info *err_info;
1214 if (err_data_has_source_info(err_data)) {
1215 for_each_ras_error(err_node, err_data) {
1216 err_info = &err_node->err_info;
1217 amdgpu_ras_error_statistic_de_count(&obj->err_data,
1218 &err_info->mcm_info, NULL, err_info->de_count);
1219 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1220 &err_info->mcm_info, NULL, err_info->ce_count);
1221 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1222 &err_info->mcm_info, NULL, err_info->ue_count);
1225 /* for legacy asic path which doesn't has error source info */
1226 obj->err_data.ue_count += err_data->ue_count;
1227 obj->err_data.ce_count += err_data->ce_count;
1228 obj->err_data.de_count += err_data->de_count;
1232 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1234 struct ras_common_if head;
1236 memset(&head, 0, sizeof(head));
1239 return amdgpu_ras_find_obj(adev, &head);
1242 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1243 const struct aca_info *aca_info, void *data)
1245 struct ras_manager *obj;
1247 obj = get_ras_manager(adev, blk);
1251 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1254 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1256 struct ras_manager *obj;
1258 obj = get_ras_manager(adev, blk);
1262 amdgpu_aca_remove_handle(&obj->aca_handle);
1267 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1268 enum aca_error_type type, struct ras_err_data *err_data)
1270 struct ras_manager *obj;
1272 obj = get_ras_manager(adev, blk);
1276 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
1279 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1280 struct aca_handle *handle, char *buf, void *data)
1282 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1283 struct ras_query_if info = {
1287 if (amdgpu_ras_query_error_status(obj->adev, &info))
1290 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1291 "ce", info.ce_count);
1294 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1295 struct ras_query_if *info,
1296 struct ras_err_data *err_data,
1297 unsigned int error_query_mode)
1299 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1300 struct amdgpu_ras_block_object *block_obj = NULL;
1303 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1306 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1309 if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1310 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1311 amdgpu_ras_get_ecc_info(adev, err_data);
1313 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1314 if (!block_obj || !block_obj->hw_ops) {
1315 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1316 get_ras_block_str(&info->head));
1320 if (block_obj->hw_ops->query_ras_error_count)
1321 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1323 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1324 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1325 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1326 if (block_obj->hw_ops->query_ras_error_status)
1327 block_obj->hw_ops->query_ras_error_status(adev);
1331 if (amdgpu_aca_is_enabled(adev)) {
1332 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
1336 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
1340 /* FIXME: add code to check return value later */
1341 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
1342 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
1349 /* query/inject/cure begin */
1350 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1352 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1353 struct ras_err_data err_data;
1354 unsigned int error_query_mode;
1360 ret = amdgpu_ras_error_data_init(&err_data);
1364 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1367 ret = amdgpu_ras_query_error_status_helper(adev, info,
1371 goto out_fini_err_data;
1373 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1375 info->ue_count = obj->err_data.ue_count;
1376 info->ce_count = obj->err_data.ce_count;
1377 info->de_count = obj->err_data.de_count;
1379 amdgpu_ras_error_generate_report(adev, info, &err_data);
1382 amdgpu_ras_error_data_fini(&err_data);
1387 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1388 enum amdgpu_ras_block block)
1390 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1391 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1392 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1393 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1394 struct amdgpu_hive_info *hive;
1395 int hive_ras_recovery = 0;
1397 if (!block_obj || !block_obj->hw_ops) {
1398 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1399 ras_block_str(block));
1403 if (!amdgpu_ras_is_supported(adev, block) ||
1404 !amdgpu_ras_get_aca_debug_mode(adev))
1407 hive = amdgpu_get_xgmi_hive(adev);
1409 hive_ras_recovery = atomic_read(&hive->ras_recovery);
1410 amdgpu_put_xgmi_hive(hive);
1413 /* skip ras error reset in gpu reset */
1414 if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1415 hive_ras_recovery) &&
1416 ((smu_funcs && smu_funcs->set_debug_mode) ||
1417 (mca_funcs && mca_funcs->mca_set_debug_mode)))
1420 if (block_obj->hw_ops->reset_ras_error_count)
1421 block_obj->hw_ops->reset_ras_error_count(adev);
1426 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1427 enum amdgpu_ras_block block)
1429 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1431 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1434 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1435 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1436 if (block_obj->hw_ops->reset_ras_error_status)
1437 block_obj->hw_ops->reset_ras_error_status(adev);
1443 /* wrapper of psp_ras_trigger_error */
1444 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1445 struct ras_inject_if *info)
1447 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1448 struct ta_ras_trigger_error_input block_info = {
1449 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1450 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1451 .sub_block_index = info->head.sub_block_index,
1452 .address = info->address,
1453 .value = info->value,
1456 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1458 info->head.sub_block_index);
1460 /* inject on guest isn't allowed, return success directly */
1461 if (amdgpu_sriov_vf(adev))
1467 if (!block_obj || !block_obj->hw_ops) {
1468 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1469 get_ras_block_str(&info->head));
1473 /* Calculate XGMI relative offset */
1474 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1475 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1476 block_info.address =
1477 amdgpu_xgmi_get_relative_phy_addr(adev,
1478 block_info.address);
1481 if (block_obj->hw_ops->ras_error_inject) {
1482 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1483 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1484 else /* Special ras_error_inject is defined (e.g: xgmi) */
1485 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1486 info->instance_mask);
1489 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1493 dev_err(adev->dev, "ras inject %s failed %d\n",
1494 get_ras_block_str(&info->head), ret);
1500 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1501 * @adev: pointer to AMD GPU device
1502 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1503 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1504 * @query_info: pointer to ras_query_if
1506 * Return 0 for query success or do nothing, otherwise return an error
1509 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1510 unsigned long *ce_count,
1511 unsigned long *ue_count,
1512 struct ras_query_if *query_info)
1517 /* do nothing if query_info is not specified */
1520 ret = amdgpu_ras_query_error_status(adev, query_info);
1524 *ce_count += query_info->ce_count;
1525 *ue_count += query_info->ue_count;
1527 /* some hardware/IP supports read to clear
1528 * no need to explictly reset the err status after the query call */
1529 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1530 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1531 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1533 "Failed to reset error counter and error status\n");
1540 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1541 * @adev: pointer to AMD GPU device
1542 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1543 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1545 * @query_info: pointer to ras_query_if if the query request is only for
1546 * specific ip block; if info is NULL, then the qurey request is for
1547 * all the ip blocks that support query ras error counters/status
1549 * If set, @ce_count or @ue_count, count and return the corresponding
1550 * error counts in those integer pointers. Return 0 if the device
1551 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1553 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1554 unsigned long *ce_count,
1555 unsigned long *ue_count,
1556 struct ras_query_if *query_info)
1558 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1559 struct ras_manager *obj;
1560 unsigned long ce, ue;
1563 if (!adev->ras_enabled || !con)
1566 /* Don't count since no reporting.
1568 if (!ce_count && !ue_count)
1574 /* query all the ip blocks that support ras query interface */
1575 list_for_each_entry(obj, &con->head, node) {
1576 struct ras_query_if info = {
1580 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1583 /* query specific ip block */
1584 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1598 /* query/inject/cure end */
1603 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1604 struct ras_badpage **bps, unsigned int *count);
1606 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1609 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1611 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1613 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1620 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1622 * It allows user to read the bad pages of vram on the gpu through
1623 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1625 * It outputs multiple lines, and each line stands for one gpu page.
1627 * The format of one line is below,
1628 * gpu pfn : gpu page size : flags
1630 * gpu pfn and gpu page size are printed in hex format.
1631 * flags can be one of below character,
1633 * R: reserved, this gpu page is reserved and not able to use.
1635 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1636 * in next window of page_reserve.
1638 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1642 * .. code-block:: bash
1644 * 0x00000001 : 0x00001000 : R
1645 * 0x00000002 : 0x00001000 : P
1649 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1650 struct kobject *kobj, struct bin_attribute *attr,
1651 char *buf, loff_t ppos, size_t count)
1653 struct amdgpu_ras *con =
1654 container_of(attr, struct amdgpu_ras, badpages_attr);
1655 struct amdgpu_device *adev = con->adev;
1656 const unsigned int element_size =
1657 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1658 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1659 unsigned int end = div64_ul(ppos + count - 1, element_size);
1661 struct ras_badpage *bps = NULL;
1662 unsigned int bps_count = 0;
1664 memset(buf, 0, count);
1666 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1669 for (; start < end && start < bps_count; start++)
1670 s += scnprintf(&buf[s], element_size + 1,
1671 "0x%08x : 0x%08x : %1s\n",
1674 amdgpu_ras_badpage_flags_str(bps[start].flags));
1681 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1682 struct device_attribute *attr, char *buf)
1684 struct amdgpu_ras *con =
1685 container_of(attr, struct amdgpu_ras, features_attr);
1687 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1690 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1691 struct device_attribute *attr, char *buf)
1693 struct amdgpu_ras *con =
1694 container_of(attr, struct amdgpu_ras, version_attr);
1695 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1698 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1699 struct device_attribute *attr, char *buf)
1701 struct amdgpu_ras *con =
1702 container_of(attr, struct amdgpu_ras, schema_attr);
1703 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1706 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1708 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1710 if (adev->dev->kobj.sd)
1711 sysfs_remove_file_from_group(&adev->dev->kobj,
1712 &con->badpages_attr.attr,
1716 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1718 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1719 struct attribute *attrs[] = {
1720 &con->features_attr.attr,
1721 &con->version_attr.attr,
1722 &con->schema_attr.attr,
1725 struct attribute_group group = {
1726 .name = RAS_FS_NAME,
1730 if (adev->dev->kobj.sd)
1731 sysfs_remove_group(&adev->dev->kobj, &group);
1736 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1737 struct ras_common_if *head)
1739 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1741 if (!obj || obj->attr_inuse)
1746 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1747 "%s_err_count", head->name);
1749 obj->sysfs_attr = (struct device_attribute){
1751 .name = obj->fs_data.sysfs_name,
1754 .show = amdgpu_ras_sysfs_read,
1756 sysfs_attr_init(&obj->sysfs_attr.attr);
1758 if (sysfs_add_file_to_group(&adev->dev->kobj,
1759 &obj->sysfs_attr.attr,
1765 obj->attr_inuse = 1;
1770 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1771 struct ras_common_if *head)
1773 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1775 if (!obj || !obj->attr_inuse)
1778 if (adev->dev->kobj.sd)
1779 sysfs_remove_file_from_group(&adev->dev->kobj,
1780 &obj->sysfs_attr.attr,
1782 obj->attr_inuse = 0;
1788 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1790 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1791 struct ras_manager *obj, *tmp;
1793 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1794 amdgpu_ras_sysfs_remove(adev, &obj->head);
1797 if (amdgpu_bad_page_threshold != 0)
1798 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1800 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1807 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1809 * Normally when there is an uncorrectable error, the driver will reset
1810 * the GPU to recover. However, in the event of an unrecoverable error,
1811 * the driver provides an interface to reboot the system automatically
1814 * The following file in debugfs provides that interface:
1815 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1819 * .. code-block:: bash
1821 * echo true > .../ras/auto_reboot
1825 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1827 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1828 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1829 struct drm_minor *minor = adev_to_drm(adev)->primary;
1832 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1833 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1834 &amdgpu_ras_debugfs_ctrl_ops);
1835 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1836 &amdgpu_ras_debugfs_eeprom_ops);
1837 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1838 &con->bad_page_cnt_threshold);
1839 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1840 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1841 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1842 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1843 &amdgpu_ras_debugfs_eeprom_size_ops);
1844 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1846 &amdgpu_ras_debugfs_eeprom_table_ops);
1847 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1850 * After one uncorrectable error happens, usually GPU recovery will
1851 * be scheduled. But due to the known problem in GPU recovery failing
1852 * to bring GPU back, below interface provides one direct way to
1853 * user to reboot system automatically in such case within
1854 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1855 * will never be called.
1857 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1860 * User could set this not to clean up hardware's error count register
1861 * of RAS IPs during ras recovery.
1863 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1864 &con->disable_ras_err_cnt_harvest);
1868 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1869 struct ras_fs_if *head,
1872 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1879 memcpy(obj->fs_data.debugfs_name,
1881 sizeof(obj->fs_data.debugfs_name));
1883 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1884 obj, &amdgpu_ras_debugfs_ops);
1887 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1889 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1891 struct ras_manager *obj;
1892 struct ras_fs_if fs_info;
1895 * it won't be called in resume path, no need to check
1896 * suspend and gpu reset status
1898 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1901 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1903 list_for_each_entry(obj, &con->head, node) {
1904 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1905 (obj->attr_inuse == 1)) {
1906 sprintf(fs_info.debugfs_name, "%s_err_inject",
1907 get_ras_block_str(&obj->head));
1908 fs_info.head = obj->head;
1909 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1913 if (amdgpu_aca_is_enabled(adev))
1914 amdgpu_aca_smu_debugfs_init(adev, dir);
1916 amdgpu_mca_smu_debugfs_init(adev, dir);
1922 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1923 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1924 static DEVICE_ATTR(features, S_IRUGO,
1925 amdgpu_ras_sysfs_features_read, NULL);
1926 static DEVICE_ATTR(version, 0444,
1927 amdgpu_ras_sysfs_version_show, NULL);
1928 static DEVICE_ATTR(schema, 0444,
1929 amdgpu_ras_sysfs_schema_show, NULL);
1930 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1932 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1933 struct attribute_group group = {
1934 .name = RAS_FS_NAME,
1936 struct attribute *attrs[] = {
1937 &con->features_attr.attr,
1938 &con->version_attr.attr,
1939 &con->schema_attr.attr,
1942 struct bin_attribute *bin_attrs[] = {
1948 group.attrs = attrs;
1950 /* add features entry */
1951 con->features_attr = dev_attr_features;
1952 sysfs_attr_init(attrs[0]);
1954 /* add version entry */
1955 con->version_attr = dev_attr_version;
1956 sysfs_attr_init(attrs[1]);
1958 /* add schema entry */
1959 con->schema_attr = dev_attr_schema;
1960 sysfs_attr_init(attrs[2]);
1962 if (amdgpu_bad_page_threshold != 0) {
1963 /* add bad_page_features entry */
1964 bin_attr_gpu_vram_bad_pages.private = NULL;
1965 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1966 bin_attrs[0] = &con->badpages_attr;
1967 group.bin_attrs = bin_attrs;
1968 sysfs_bin_attr_init(bin_attrs[0]);
1971 r = sysfs_create_group(&adev->dev->kobj, &group);
1973 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1978 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1980 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1981 struct ras_manager *con_obj, *ip_obj, *tmp;
1983 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1984 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1985 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1991 amdgpu_ras_sysfs_remove_all(adev);
1998 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1999 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2000 * register to check whether the interrupt is triggered or not, and properly
2001 * ack the interrupt if it is there
2003 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2005 /* Fatal error events are handled on host side */
2006 if (amdgpu_sriov_vf(adev))
2009 if (adev->nbio.ras &&
2010 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2011 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2013 if (adev->nbio.ras &&
2014 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2015 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2018 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2019 struct amdgpu_iv_entry *entry)
2021 bool poison_stat = false;
2022 struct amdgpu_device *adev = obj->adev;
2023 struct amdgpu_ras_block_object *block_obj =
2024 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2029 /* both query_poison_status and handle_poison_consumption are optional,
2030 * but at least one of them should be implemented if we need poison
2031 * consumption handler
2033 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2034 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2036 /* Not poison consumption interrupt, no need to handle it */
2037 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2038 block_obj->ras_comm.name);
2044 amdgpu_umc_poison_handler(adev, obj->head.block, false);
2046 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2047 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2049 /* gpu reset is fallback for failed and default cases */
2051 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
2052 block_obj->ras_comm.name);
2053 amdgpu_ras_reset_gpu(adev);
2055 amdgpu_gfx_poison_consumption_handler(adev, entry);
2059 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2060 struct amdgpu_iv_entry *entry)
2062 dev_info(obj->adev->dev,
2063 "Poison is created\n");
2066 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2067 struct amdgpu_iv_entry *entry)
2069 struct ras_ih_data *data = &obj->ih_data;
2070 struct ras_err_data err_data;
2076 ret = amdgpu_ras_error_data_init(&err_data);
2080 /* Let IP handle its data, maybe we need get the output
2081 * from the callback to update the error type/count, etc
2083 ret = data->cb(obj->adev, &err_data, entry);
2084 /* ue will trigger an interrupt, and in that case
2085 * we need do a reset to recovery the whole system.
2086 * But leave IP do that recovery, here we just dispatch
2089 if (ret == AMDGPU_RAS_SUCCESS) {
2090 /* these counts could be left as 0 if
2091 * some blocks do not count error number
2093 obj->err_data.ue_count += err_data.ue_count;
2094 obj->err_data.ce_count += err_data.ce_count;
2095 obj->err_data.de_count += err_data.de_count;
2098 amdgpu_ras_error_data_fini(&err_data);
2101 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2103 struct ras_ih_data *data = &obj->ih_data;
2104 struct amdgpu_iv_entry entry;
2106 while (data->rptr != data->wptr) {
2108 memcpy(&entry, &data->ring[data->rptr],
2109 data->element_size);
2112 data->rptr = (data->aligned_element_size +
2113 data->rptr) % data->ring_size;
2115 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2116 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2117 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2119 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2121 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2122 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2124 dev_warn(obj->adev->dev,
2125 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2130 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2132 struct ras_ih_data *data =
2133 container_of(work, struct ras_ih_data, ih_work);
2134 struct ras_manager *obj =
2135 container_of(data, struct ras_manager, ih_data);
2137 amdgpu_ras_interrupt_handler(obj);
2140 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2141 struct ras_dispatch_if *info)
2143 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
2144 struct ras_ih_data *data = &obj->ih_data;
2149 if (data->inuse == 0)
2152 /* Might be overflow... */
2153 memcpy(&data->ring[data->wptr], info->entry,
2154 data->element_size);
2157 data->wptr = (data->aligned_element_size +
2158 data->wptr) % data->ring_size;
2160 schedule_work(&data->ih_work);
2165 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2166 struct ras_common_if *head)
2168 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2169 struct ras_ih_data *data;
2174 data = &obj->ih_data;
2175 if (data->inuse == 0)
2178 cancel_work_sync(&data->ih_work);
2181 memset(data, 0, sizeof(*data));
2187 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2188 struct ras_common_if *head)
2190 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2191 struct ras_ih_data *data;
2192 struct amdgpu_ras_block_object *ras_obj;
2195 /* in case we registe the IH before enable ras feature */
2196 obj = amdgpu_ras_create_obj(adev, head);
2202 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2204 data = &obj->ih_data;
2205 /* add the callback.etc */
2206 *data = (struct ras_ih_data) {
2208 .cb = ras_obj->ras_cb,
2209 .element_size = sizeof(struct amdgpu_iv_entry),
2214 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2216 data->aligned_element_size = ALIGN(data->element_size, 8);
2217 /* the ring can store 64 iv entries. */
2218 data->ring_size = 64 * data->aligned_element_size;
2219 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2231 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2233 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2234 struct ras_manager *obj, *tmp;
2236 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2237 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2244 /* traversal all IPs except NBIO to query error counter */
2245 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2247 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2248 struct ras_manager *obj;
2250 if (!adev->ras_enabled || !con)
2253 list_for_each_entry(obj, &con->head, node) {
2254 struct ras_query_if info = {
2259 * PCIE_BIF IP has one different isr by ras controller
2260 * interrupt, the specific ras counter query will be
2261 * done in that isr. So skip such block from common
2262 * sync flood interrupt isr calling.
2264 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2268 * this is a workaround for aldebaran, skip send msg to
2269 * smu to get ecc_info table due to smu handle get ecc
2270 * info table failed temporarily.
2271 * should be removed until smu fix handle ecc_info table.
2273 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2274 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2275 IP_VERSION(13, 0, 2)))
2278 amdgpu_ras_query_error_status(adev, &info);
2280 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2281 IP_VERSION(11, 0, 2) &&
2282 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2283 IP_VERSION(11, 0, 4) &&
2284 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2285 IP_VERSION(13, 0, 0)) {
2286 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2287 dev_warn(adev->dev, "Failed to reset error counter and error status");
2292 /* Parse RdRspStatus and WrRspStatus */
2293 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2294 struct ras_query_if *info)
2296 struct amdgpu_ras_block_object *block_obj;
2298 * Only two block need to query read/write
2299 * RspStatus at current state
2301 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2302 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2305 block_obj = amdgpu_ras_get_ras_block(adev,
2307 info->head.sub_block_index);
2309 if (!block_obj || !block_obj->hw_ops) {
2310 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2311 get_ras_block_str(&info->head));
2315 if (block_obj->hw_ops->query_ras_error_status)
2316 block_obj->hw_ops->query_ras_error_status(adev);
2320 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2322 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2323 struct ras_manager *obj;
2325 if (!adev->ras_enabled || !con)
2328 list_for_each_entry(obj, &con->head, node) {
2329 struct ras_query_if info = {
2333 amdgpu_ras_error_status_query(adev, &info);
2337 /* recovery begin */
2339 /* return 0 on success.
2340 * caller need free bps.
2342 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2343 struct ras_badpage **bps, unsigned int *count)
2345 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2346 struct ras_err_handler_data *data;
2348 int ret = 0, status;
2350 if (!con || !con->eh_data || !bps || !count)
2353 mutex_lock(&con->recovery_lock);
2354 data = con->eh_data;
2355 if (!data || data->count == 0) {
2361 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2367 for (; i < data->count; i++) {
2368 (*bps)[i] = (struct ras_badpage){
2369 .bp = data->bps[i].retired_page,
2370 .size = AMDGPU_GPU_PAGE_SIZE,
2371 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2373 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2374 data->bps[i].retired_page);
2375 if (status == -EBUSY)
2376 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2377 else if (status == -ENOENT)
2378 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2381 *count = data->count;
2383 mutex_unlock(&con->recovery_lock);
2387 static void amdgpu_ras_do_recovery(struct work_struct *work)
2389 struct amdgpu_ras *ras =
2390 container_of(work, struct amdgpu_ras, recovery_work);
2391 struct amdgpu_device *remote_adev = NULL;
2392 struct amdgpu_device *adev = ras->adev;
2393 struct list_head device_list, *device_list_handle = NULL;
2394 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2397 atomic_set(&hive->ras_recovery, 1);
2398 if (!ras->disable_ras_err_cnt_harvest) {
2400 /* Build list of devices to query RAS related errors */
2401 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2402 device_list_handle = &hive->device_list;
2404 INIT_LIST_HEAD(&device_list);
2405 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2406 device_list_handle = &device_list;
2409 list_for_each_entry(remote_adev,
2410 device_list_handle, gmc.xgmi.head) {
2411 amdgpu_ras_query_err_status(remote_adev);
2412 amdgpu_ras_log_on_err_counter(remote_adev);
2417 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2418 struct amdgpu_reset_context reset_context;
2419 memset(&reset_context, 0, sizeof(reset_context));
2421 reset_context.method = AMD_RESET_METHOD_NONE;
2422 reset_context.reset_req_dev = adev;
2424 /* Perform full reset in fatal error mode */
2425 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2426 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2428 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2430 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2431 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2432 reset_context.method = AMD_RESET_METHOD_MODE2;
2435 /* Fatal error occurs in poison mode, mode1 reset is used to
2438 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2439 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2440 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2442 /* For any RAS error that needs a full reset to
2443 * recover, set the fatal error status
2446 list_for_each_entry(remote_adev,
2449 amdgpu_ras_set_fed(remote_adev,
2452 amdgpu_ras_set_fed(adev, true);
2454 psp_fatal_error_recovery_quirk(&adev->psp);
2458 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2460 atomic_set(&ras->in_recovery, 0);
2462 atomic_set(&hive->ras_recovery, 0);
2463 amdgpu_put_xgmi_hive(hive);
2467 /* alloc/realloc bps array */
2468 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2469 struct ras_err_handler_data *data, int pages)
2471 unsigned int old_space = data->count + data->space_left;
2472 unsigned int new_space = old_space + pages;
2473 unsigned int align_space = ALIGN(new_space, 512);
2474 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2481 memcpy(bps, data->bps,
2482 data->count * sizeof(*data->bps));
2487 data->space_left += align_space - old_space;
2491 /* it deal with vram only. */
2492 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2493 struct eeprom_table_record *bps, int pages)
2495 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2496 struct ras_err_handler_data *data;
2500 if (!con || !con->eh_data || !bps || pages <= 0)
2503 mutex_lock(&con->recovery_lock);
2504 data = con->eh_data;
2508 for (i = 0; i < pages; i++) {
2509 if (amdgpu_ras_check_bad_page_unlock(con,
2510 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2513 if (!data->space_left &&
2514 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2519 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2520 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2521 AMDGPU_GPU_PAGE_SIZE);
2523 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2528 mutex_unlock(&con->recovery_lock);
2534 * write error record array to eeprom, the function should be
2535 * protected by recovery_lock
2536 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2538 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2539 unsigned long *new_cnt)
2541 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2542 struct ras_err_handler_data *data;
2543 struct amdgpu_ras_eeprom_control *control;
2546 if (!con || !con->eh_data) {
2553 mutex_lock(&con->recovery_lock);
2554 control = &con->eeprom_control;
2555 data = con->eh_data;
2556 save_count = data->count - control->ras_num_recs;
2557 mutex_unlock(&con->recovery_lock);
2560 *new_cnt = save_count / adev->umc.retire_unit;
2562 /* only new entries are saved */
2563 if (save_count > 0) {
2564 if (amdgpu_ras_eeprom_append(control,
2565 &data->bps[control->ras_num_recs],
2567 dev_err(adev->dev, "Failed to save EEPROM table data!");
2571 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2578 * read error record array in eeprom and reserve enough space for
2579 * storing new bad pages
2581 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2583 struct amdgpu_ras_eeprom_control *control =
2584 &adev->psp.ras_context.ras->eeprom_control;
2585 struct eeprom_table_record *bps;
2588 /* no bad page record, skip eeprom access */
2589 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2592 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2596 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2598 dev_err(adev->dev, "Failed to load EEPROM table records!");
2600 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2606 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2609 struct ras_err_handler_data *data = con->eh_data;
2612 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2613 for (i = 0; i < data->count; i++)
2614 if (addr == data->bps[i].retired_page)
2621 * check if an address belongs to bad page
2623 * Note: this check is only for umc block
2625 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2628 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2631 if (!con || !con->eh_data)
2634 mutex_lock(&con->recovery_lock);
2635 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2636 mutex_unlock(&con->recovery_lock);
2640 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2643 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2646 * Justification of value bad_page_cnt_threshold in ras structure
2648 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2649 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2650 * scenarios accordingly.
2652 * Bad page retirement enablement:
2653 * - If amdgpu_bad_page_threshold = -2,
2654 * bad_page_cnt_threshold = typical value by formula.
2656 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2657 * max record length in eeprom, use it directly.
2659 * Bad page retirement disablement:
2660 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2661 * functionality is disabled, and bad_page_cnt_threshold will
2665 if (amdgpu_bad_page_threshold < 0) {
2666 u64 val = adev->gmc.mc_vram_size;
2668 do_div(val, RAS_BAD_PAGE_COVER);
2669 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2672 con->bad_page_cnt_threshold = min_t(int, max_count,
2673 amdgpu_bad_page_threshold);
2677 static int amdgpu_ras_page_retirement_thread(void *param)
2679 struct amdgpu_device *adev = (struct amdgpu_device *)param;
2680 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2682 while (!kthread_should_stop()) {
2684 wait_event_interruptible(con->page_retirement_wq,
2685 kthread_should_stop() ||
2686 atomic_read(&con->page_retirement_req_cnt));
2688 if (kthread_should_stop())
2691 dev_info(adev->dev, "Start processing page retirement. request:%d\n",
2692 atomic_read(&con->page_retirement_req_cnt));
2694 atomic_dec(&con->page_retirement_req_cnt);
2696 amdgpu_umc_bad_page_polling_timeout(adev,
2697 false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
2703 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2706 struct ras_err_handler_data **data;
2707 u32 max_eeprom_records_count = 0;
2708 bool exc_err_limit = false;
2711 if (!con || amdgpu_sriov_vf(adev))
2714 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2715 * supports RAS and debugfs is enabled, but when
2716 * adev->ras_enabled is unset, i.e. when "ras_enable"
2717 * module parameter is set to 0.
2721 if (!adev->ras_enabled)
2724 data = &con->eh_data;
2725 *data = kzalloc(sizeof(**data), GFP_KERNEL);
2731 mutex_init(&con->recovery_lock);
2732 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2733 atomic_set(&con->in_recovery, 0);
2734 con->eeprom_control.bad_channel_bitmap = 0;
2736 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2737 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2739 /* Todo: During test the SMU might fail to read the eeprom through I2C
2740 * when the GPU is pending on XGMI reset during probe time
2741 * (Mostly after second bus reset), skip it now
2743 if (adev->gmc.xgmi.pending_reset)
2745 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2747 * This calling fails when exc_err_limit is true or
2750 if (exc_err_limit || ret)
2753 if (con->eeprom_control.ras_num_recs) {
2754 ret = amdgpu_ras_load_bad_pages(adev);
2758 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2760 if (con->update_channel_flag == true) {
2761 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2762 con->update_channel_flag = false;
2766 mutex_init(&con->page_retirement_lock);
2767 init_waitqueue_head(&con->page_retirement_wq);
2768 atomic_set(&con->page_retirement_req_cnt, 0);
2769 con->page_retirement_thread =
2770 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
2771 if (IS_ERR(con->page_retirement_thread)) {
2772 con->page_retirement_thread = NULL;
2773 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
2776 #ifdef CONFIG_X86_MCE_AMD
2777 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2778 (adev->gmc.xgmi.connected_to_cpu))
2779 amdgpu_register_bad_pages_mca_notifier(adev);
2784 kfree((*data)->bps);
2786 con->eh_data = NULL;
2788 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2791 * Except error threshold exceeding case, other failure cases in this
2792 * function would not fail amdgpu driver init.
2802 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2804 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2805 struct ras_err_handler_data *data = con->eh_data;
2807 /* recovery_init failed to init it, fini is useless */
2811 if (con->page_retirement_thread)
2812 kthread_stop(con->page_retirement_thread);
2814 atomic_set(&con->page_retirement_req_cnt, 0);
2816 cancel_work_sync(&con->recovery_work);
2818 mutex_lock(&con->recovery_lock);
2819 con->eh_data = NULL;
2822 mutex_unlock(&con->recovery_lock);
2828 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2830 if (amdgpu_sriov_vf(adev)) {
2831 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2832 case IP_VERSION(13, 0, 2):
2833 case IP_VERSION(13, 0, 6):
2840 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2841 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2842 case IP_VERSION(13, 0, 0):
2843 case IP_VERSION(13, 0, 6):
2844 case IP_VERSION(13, 0, 10):
2851 return adev->asic_type == CHIP_VEGA10 ||
2852 adev->asic_type == CHIP_VEGA20 ||
2853 adev->asic_type == CHIP_ARCTURUS ||
2854 adev->asic_type == CHIP_ALDEBARAN ||
2855 adev->asic_type == CHIP_SIENNA_CICHLID;
2859 * this is workaround for vega20 workstation sku,
2860 * force enable gfx ras, ignore vbios gfx ras flag
2861 * due to GC EDC can not write
2863 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2865 struct atom_context *ctx = adev->mode_info.atom_context;
2870 if (strnstr(ctx->vbios_pn, "D16406",
2871 sizeof(ctx->vbios_pn)) ||
2872 strnstr(ctx->vbios_pn, "D36002",
2873 sizeof(ctx->vbios_pn)))
2874 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2877 /* Query ras capablity via atomfirmware interface */
2878 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
2881 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2882 dev_info(adev->dev, "MEM ECC is active.\n");
2883 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2884 1 << AMDGPU_RAS_BLOCK__DF);
2886 dev_info(adev->dev, "MEM ECC is not presented.\n");
2890 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2891 dev_info(adev->dev, "SRAM ECC is active.\n");
2892 if (!amdgpu_sriov_vf(adev))
2893 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2894 1 << AMDGPU_RAS_BLOCK__DF);
2896 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2897 1 << AMDGPU_RAS_BLOCK__SDMA |
2898 1 << AMDGPU_RAS_BLOCK__GFX);
2901 * VCN/JPEG RAS can be supported on both bare metal and
2904 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
2905 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
2906 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
2907 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2908 1 << AMDGPU_RAS_BLOCK__JPEG);
2910 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2911 1 << AMDGPU_RAS_BLOCK__JPEG);
2914 * XGMI RAS is not supported if xgmi num physical nodes
2917 if (!adev->gmc.xgmi.num_physical_nodes)
2918 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2920 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2924 /* Query poison mode from umc/df IP callbacks */
2925 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2927 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2928 bool df_poison, umc_poison;
2930 /* poison setting is useless on SRIOV guest */
2931 if (amdgpu_sriov_vf(adev) || !con)
2934 /* Init poison supported flag, the default value is false */
2935 if (adev->gmc.xgmi.connected_to_cpu ||
2936 adev->gmc.is_app_apu) {
2937 /* enabled by default when GPU is connected to CPU */
2938 con->poison_supported = true;
2939 } else if (adev->df.funcs &&
2940 adev->df.funcs->query_ras_poison_mode &&
2942 adev->umc.ras->query_ras_poison_mode) {
2944 adev->df.funcs->query_ras_poison_mode(adev);
2946 adev->umc.ras->query_ras_poison_mode(adev);
2948 /* Only poison is set in both DF and UMC, we can support it */
2949 if (df_poison && umc_poison)
2950 con->poison_supported = true;
2951 else if (df_poison != umc_poison)
2953 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2954 df_poison, umc_poison);
2959 * check hardware's ras ability which will be saved in hw_supported.
2960 * if hardware does not support ras, we can skip some ras initializtion and
2961 * forbid some ras operations from IP.
2962 * if software itself, say boot parameter, limit the ras ability. We still
2963 * need allow IP do some limited operations, like disable. In such case,
2964 * we have to initialize ras as normal. but need check if operation is
2965 * allowed or not in each function.
2967 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2969 adev->ras_hw_enabled = adev->ras_enabled = 0;
2971 if (!amdgpu_ras_asic_supported(adev))
2974 /* query ras capability from psp */
2975 if (amdgpu_psp_get_ras_capability(&adev->psp))
2976 goto init_ras_enabled_flag;
2978 /* query ras capablity from bios */
2979 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2980 amdgpu_ras_query_ras_capablity_from_vbios(adev);
2982 /* driver only manages a few IP blocks RAS feature
2983 * when GPU is connected cpu through XGMI */
2984 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2985 1 << AMDGPU_RAS_BLOCK__SDMA |
2986 1 << AMDGPU_RAS_BLOCK__MMHUB);
2989 /* apply asic specific settings (vega20 only for now) */
2990 amdgpu_ras_get_quirks(adev);
2992 /* query poison mode from umc/df ip callback */
2993 amdgpu_ras_query_poison_mode(adev);
2995 init_ras_enabled_flag:
2996 /* hw_supported needs to be aligned with RAS block mask. */
2997 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2999 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3000 adev->ras_hw_enabled & amdgpu_ras_mask;
3002 /* aca is disabled by default */
3003 adev->aca.is_enabled = false;
3006 static void amdgpu_ras_counte_dw(struct work_struct *work)
3008 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3009 ras_counte_delay_work.work);
3010 struct amdgpu_device *adev = con->adev;
3011 struct drm_device *dev = adev_to_drm(adev);
3012 unsigned long ce_count, ue_count;
3015 res = pm_runtime_get_sync(dev->dev);
3019 /* Cache new values.
3021 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3022 atomic_set(&con->ras_ce_count, ce_count);
3023 atomic_set(&con->ras_ue_count, ue_count);
3026 pm_runtime_mark_last_busy(dev->dev);
3028 pm_runtime_put_autosuspend(dev->dev);
3031 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3033 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3034 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3035 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3036 AMDGPU_RAS_ERROR__PARITY;
3039 int amdgpu_ras_init(struct amdgpu_device *adev)
3041 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3047 con = kzalloc(sizeof(*con) +
3048 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3049 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3055 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3056 atomic_set(&con->ras_ce_count, 0);
3057 atomic_set(&con->ras_ue_count, 0);
3059 con->objs = (struct ras_manager *)(con + 1);
3061 amdgpu_ras_set_context(adev, con);
3063 amdgpu_ras_check_supported(adev);
3065 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3066 /* set gfx block ras context feature for VEGA20 Gaming
3067 * send ras disable cmd to ras ta during ras late init.
3069 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3070 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3079 con->update_channel_flag = false;
3082 INIT_LIST_HEAD(&con->head);
3083 /* Might need get this flag from vbios. */
3084 con->flags = RAS_DEFAULT_FLAGS;
3086 /* initialize nbio ras function ahead of any other
3087 * ras functions so hardware fatal error interrupt
3088 * can be enabled as early as possible */
3089 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3090 case IP_VERSION(7, 4, 0):
3091 case IP_VERSION(7, 4, 1):
3092 case IP_VERSION(7, 4, 4):
3093 if (!adev->gmc.xgmi.connected_to_cpu)
3094 adev->nbio.ras = &nbio_v7_4_ras;
3096 case IP_VERSION(4, 3, 0):
3097 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3098 /* unlike other generation of nbio ras,
3099 * nbio v4_3 only support fatal error interrupt
3100 * to inform software that DF is freezed due to
3101 * system fatal error event. driver should not
3102 * enable nbio ras in such case. Instead,
3104 adev->nbio.ras = &nbio_v4_3_ras;
3106 case IP_VERSION(7, 9, 0):
3107 if (!adev->gmc.is_app_apu)
3108 adev->nbio.ras = &nbio_v7_9_ras;
3111 /* nbio ras is not available */
3115 /* nbio ras block needs to be enabled ahead of other ras blocks
3116 * to handle fatal error */
3117 r = amdgpu_nbio_ras_sw_init(adev);
3121 if (adev->nbio.ras &&
3122 adev->nbio.ras->init_ras_controller_interrupt) {
3123 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3128 if (adev->nbio.ras &&
3129 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3130 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3135 /* Packed socket_id to ras feature mask bits[31:29] */
3136 if (adev->smuio.funcs &&
3137 adev->smuio.funcs->get_socket_id)
3138 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3139 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3141 /* Get RAS schema for particular SOC */
3142 con->schema = amdgpu_get_ras_schema(adev);
3144 if (amdgpu_ras_fs_init(adev)) {
3149 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3150 "hardware ability[%x] ras_mask[%x]\n",
3151 adev->ras_hw_enabled, adev->ras_enabled);
3155 amdgpu_ras_set_context(adev, NULL);
3161 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3163 if (adev->gmc.xgmi.connected_to_cpu ||
3164 adev->gmc.is_app_apu)
3169 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3170 struct ras_common_if *ras_block)
3172 struct ras_query_if info = {
3176 if (!amdgpu_persistent_edc_harvesting_supported(adev))
3179 if (amdgpu_ras_query_error_status(adev, &info) != 0)
3180 DRM_WARN("RAS init harvest failure");
3182 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3183 DRM_WARN("RAS init harvest reset failure");
3188 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3190 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3195 return con->poison_supported;
3198 /* helper function to handle common stuff in ip late init phase */
3199 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3200 struct ras_common_if *ras_block)
3202 struct amdgpu_ras_block_object *ras_obj = NULL;
3203 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3204 struct ras_query_if *query_info;
3205 unsigned long ue_count, ce_count;
3208 /* disable RAS feature per IP block if it is not supported */
3209 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3210 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3214 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3216 if (adev->in_suspend || amdgpu_in_reset(adev)) {
3217 /* in resume phase, if fail to enable ras,
3218 * clean up all ras fs nodes, and disable ras */
3224 /* check for errors on warm reset edc persisant supported ASIC */
3225 amdgpu_persistent_edc_harvesting(adev, ras_block);
3227 /* in resume phase, no need to create ras fs node */
3228 if (adev->in_suspend || amdgpu_in_reset(adev))
3231 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3232 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3233 (ras_obj->hw_ops->query_poison_status ||
3234 ras_obj->hw_ops->handle_poison_consumption))) {
3235 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3240 if (ras_obj->hw_ops &&
3241 (ras_obj->hw_ops->query_ras_error_count ||
3242 ras_obj->hw_ops->query_ras_error_status)) {
3243 r = amdgpu_ras_sysfs_create(adev, ras_block);
3247 /* Those are the cached values at init.
3249 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3252 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3254 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3255 atomic_set(&con->ras_ce_count, ce_count);
3256 atomic_set(&con->ras_ue_count, ue_count);
3265 if (ras_obj->ras_cb)
3266 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3268 amdgpu_ras_feature_enable(adev, ras_block, 0);
3272 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3273 struct ras_common_if *ras_block)
3275 return amdgpu_ras_block_late_init(adev, ras_block);
3278 /* helper function to remove ras fs node and interrupt handler */
3279 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3280 struct ras_common_if *ras_block)
3282 struct amdgpu_ras_block_object *ras_obj;
3286 amdgpu_ras_sysfs_remove(adev, ras_block);
3288 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3289 if (ras_obj->ras_cb)
3290 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3293 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3294 struct ras_common_if *ras_block)
3296 return amdgpu_ras_block_late_fini(adev, ras_block);
3299 /* do some init work after IP late init as dependence.
3300 * and it runs in resume/gpu reset/booting up cases.
3302 void amdgpu_ras_resume(struct amdgpu_device *adev)
3304 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3305 struct ras_manager *obj, *tmp;
3307 if (!adev->ras_enabled || !con) {
3308 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3309 amdgpu_release_ras_context(adev);
3314 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3315 /* Set up all other IPs which are not implemented. There is a
3316 * tricky thing that IP's actual ras error type should be
3317 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3318 * ERROR_NONE make sense anyway.
3320 amdgpu_ras_enable_all_features(adev, 1);
3322 /* We enable ras on all hw_supported block, but as boot
3323 * parameter might disable some of them and one or more IP has
3324 * not implemented yet. So we disable them on behalf.
3326 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3327 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3328 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3329 /* there should be no any reference. */
3330 WARN_ON(alive_obj(obj));
3336 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3338 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3340 if (!adev->ras_enabled || !con)
3343 amdgpu_ras_disable_all_features(adev, 0);
3344 /* Make sure all ras objects are disabled. */
3345 if (AMDGPU_RAS_GET_FEATURES(con->features))
3346 amdgpu_ras_disable_all_features(adev, 1);
3349 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3351 struct amdgpu_ras_block_list *node, *tmp;
3352 struct amdgpu_ras_block_object *obj;
3355 /* Guest side doesn't need init ras feature */
3356 if (amdgpu_sriov_vf(adev))
3359 if (amdgpu_aca_is_enabled(adev)) {
3360 if (amdgpu_in_reset(adev))
3361 r = amdgpu_aca_reset(adev);
3363 r = amdgpu_aca_init(adev);
3367 amdgpu_ras_set_aca_debug_mode(adev, false);
3369 amdgpu_ras_set_mca_debug_mode(adev, false);
3372 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3373 obj = node->ras_obj;
3375 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3379 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3382 if (obj->ras_late_init) {
3383 r = obj->ras_late_init(adev, &obj->ras_comm);
3385 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3386 obj->ras_comm.name, r);
3390 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3396 /* do some fini work before IP fini as dependence */
3397 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3399 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3401 if (!adev->ras_enabled || !con)
3405 /* Need disable ras on all IPs here before ip [hw/sw]fini */
3406 if (AMDGPU_RAS_GET_FEATURES(con->features))
3407 amdgpu_ras_disable_all_features(adev, 0);
3408 amdgpu_ras_recovery_fini(adev);
3412 int amdgpu_ras_fini(struct amdgpu_device *adev)
3414 struct amdgpu_ras_block_list *ras_node, *tmp;
3415 struct amdgpu_ras_block_object *obj = NULL;
3416 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3418 if (!adev->ras_enabled || !con)
3421 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3422 if (ras_node->ras_obj) {
3423 obj = ras_node->ras_obj;
3424 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3426 obj->ras_fini(adev, &obj->ras_comm);
3428 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3431 /* Clear ras blocks from ras_list and free ras block list node */
3432 list_del(&ras_node->node);
3436 amdgpu_ras_fs_fini(adev);
3437 amdgpu_ras_interrupt_remove_all(adev);
3439 if (amdgpu_aca_is_enabled(adev))
3440 amdgpu_aca_fini(adev);
3442 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
3444 if (AMDGPU_RAS_GET_FEATURES(con->features))
3445 amdgpu_ras_disable_all_features(adev, 0);
3447 cancel_delayed_work_sync(&con->ras_counte_delay_work);
3449 amdgpu_ras_set_context(adev, NULL);
3455 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
3457 struct amdgpu_ras *ras;
3459 ras = amdgpu_ras_get_context(adev);
3463 return atomic_read(&ras->fed);
3466 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
3468 struct amdgpu_ras *ras;
3470 ras = amdgpu_ras_get_context(adev);
3472 atomic_set(&ras->fed, !!status);
3475 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3477 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3478 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3480 dev_info(adev->dev, "uncorrectable hardware error"
3481 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3483 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3484 amdgpu_ras_reset_gpu(adev);
3488 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3490 if (adev->asic_type == CHIP_VEGA20 &&
3491 adev->pm.fw_version <= 0x283400) {
3492 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3493 amdgpu_ras_intr_triggered();
3499 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3501 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3506 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3507 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3508 amdgpu_ras_set_context(adev, NULL);
3513 #ifdef CONFIG_X86_MCE_AMD
3514 static struct amdgpu_device *find_adev(uint32_t node_id)
3517 struct amdgpu_device *adev = NULL;
3519 for (i = 0; i < mce_adev_list.num_gpu; i++) {
3520 adev = mce_adev_list.devs[i];
3522 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3523 adev->gmc.xgmi.physical_node_id == node_id)
3531 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3532 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3533 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3534 #define GPU_ID_OFFSET 8
3536 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3537 unsigned long val, void *data)
3539 struct mce *m = (struct mce *)data;
3540 struct amdgpu_device *adev = NULL;
3541 uint32_t gpu_id = 0;
3542 uint32_t umc_inst = 0, ch_inst = 0;
3545 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3546 * and error occurred in DramECC (Extended error code = 0) then only
3547 * process the error, else bail out.
3549 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3550 (XEC(m->status, 0x3f) == 0x0)))
3554 * If it is correctable error, return.
3556 if (mce_is_correctable(m))
3560 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3562 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3564 adev = find_adev(gpu_id);
3566 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3572 * If it is uncorrectable error, then find out UMC instance and
3575 umc_inst = GET_UMC_INST(m->ipid);
3576 ch_inst = GET_CHAN_INDEX(m->ipid);
3578 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3581 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3587 static struct notifier_block amdgpu_bad_page_nb = {
3588 .notifier_call = amdgpu_bad_page_notifier,
3589 .priority = MCE_PRIO_UC,
3592 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3595 * Add the adev to the mce_adev_list.
3596 * During mode2 reset, amdgpu device is temporarily
3597 * removed from the mgpu_info list which can cause
3598 * page retirement to fail.
3599 * Use this list instead of mgpu_info to find the amdgpu
3600 * device on which the UMC error was reported.
3602 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3605 * Register the x86 notifier only once
3606 * with MCE subsystem.
3608 if (notifier_registered == false) {
3609 mce_register_decode_chain(&amdgpu_bad_page_nb);
3610 notifier_registered = true;
3615 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3620 return adev->psp.ras_context.ras;
3623 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3628 adev->psp.ras_context.ras = ras_con;
3632 /* check if ras is supported on block, say, sdma, gfx */
3633 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3637 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3639 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3642 ret = ras && (adev->ras_enabled & (1 << block));
3644 /* For the special asic with mem ecc enabled but sram ecc
3645 * not enabled, even if the ras block is not supported on
3646 * .ras_enabled, if the asic supports poison mode and the
3647 * ras block has ras configuration, it can be considered
3648 * that the ras block supports ras function.
3651 (block == AMDGPU_RAS_BLOCK__GFX ||
3652 block == AMDGPU_RAS_BLOCK__SDMA ||
3653 block == AMDGPU_RAS_BLOCK__VCN ||
3654 block == AMDGPU_RAS_BLOCK__JPEG) &&
3655 (amdgpu_ras_mask & (1 << block)) &&
3656 amdgpu_ras_is_poison_mode_supported(adev) &&
3657 amdgpu_ras_get_ras_block(adev, block, 0))
3663 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3665 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3667 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3668 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3672 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3674 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3678 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3680 con->is_aca_debug_mode = enable;
3686 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
3688 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3692 if (amdgpu_aca_is_enabled(adev))
3693 ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
3695 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3697 con->is_aca_debug_mode = enable;
3703 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
3705 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3706 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3707 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3712 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
3713 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
3714 return con->is_aca_debug_mode;
3719 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
3720 unsigned int *error_query_mode)
3722 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3723 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3724 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
3727 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
3731 if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
3733 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
3735 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
3740 /* Register each ip ras block into amdgpu ras */
3741 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3742 struct amdgpu_ras_block_object *ras_block_obj)
3744 struct amdgpu_ras_block_list *ras_node;
3745 if (!adev || !ras_block_obj)
3748 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3752 INIT_LIST_HEAD(&ras_node->node);
3753 ras_node->ras_obj = ras_block_obj;
3754 list_add_tail(&ras_node->node, &adev->ras_list);
3759 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3765 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3766 sprintf(err_type_name, "correctable");
3768 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3769 sprintf(err_type_name, "uncorrectable");
3772 sprintf(err_type_name, "unknown");
3777 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3778 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3780 uint32_t *memory_id)
3782 uint32_t err_status_lo_data, err_status_lo_offset;
3787 err_status_lo_offset =
3788 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3789 reg_entry->seg_lo, reg_entry->reg_lo);
3790 err_status_lo_data = RREG32(err_status_lo_offset);
3792 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3793 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3796 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3801 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3802 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3804 unsigned long *err_cnt)
3806 uint32_t err_status_hi_data, err_status_hi_offset;
3811 err_status_hi_offset =
3812 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3813 reg_entry->seg_hi, reg_entry->reg_hi);
3814 err_status_hi_data = RREG32(err_status_hi_offset);
3816 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3817 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3818 /* keep the check here in case we need to refer to the result later */
3819 dev_dbg(adev->dev, "Invalid err_info field\n");
3821 /* read err count */
3822 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3827 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3828 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3829 uint32_t reg_list_size,
3830 const struct amdgpu_ras_memory_id_entry *mem_list,
3831 uint32_t mem_list_size,
3834 unsigned long *err_count)
3837 unsigned long err_cnt;
3838 char err_type_name[16];
3841 for (i = 0; i < reg_list_size; i++) {
3842 /* query memory_id from err_status_lo */
3843 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3844 instance, &memory_id))
3847 /* query err_cnt from err_status_hi */
3848 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3849 instance, &err_cnt) ||
3853 *err_count += err_cnt;
3855 /* log the errors */
3856 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3858 /* memory_list is not supported */
3860 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3861 err_cnt, err_type_name,
3862 reg_list[i].block_name,
3863 instance, memory_id);
3865 for (j = 0; j < mem_list_size; j++) {
3866 if (memory_id == mem_list[j].memory_id) {
3868 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3869 err_cnt, err_type_name,
3870 reg_list[i].block_name,
3871 instance, mem_list[j].name);
3879 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3880 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3881 uint32_t reg_list_size,
3884 uint32_t err_status_lo_offset, err_status_hi_offset;
3887 for (i = 0; i < reg_list_size; i++) {
3888 err_status_lo_offset =
3889 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3890 reg_list[i].seg_lo, reg_list[i].reg_lo);
3891 err_status_hi_offset =
3892 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3893 reg_list[i].seg_hi, reg_list[i].reg_hi);
3894 WREG32(err_status_lo_offset, 0);
3895 WREG32(err_status_hi_offset, 0);
3899 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
3901 memset(err_data, 0, sizeof(*err_data));
3903 INIT_LIST_HEAD(&err_data->err_node_list);
3908 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
3913 list_del(&err_node->node);
3917 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
3919 struct ras_err_node *err_node, *tmp;
3921 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
3922 amdgpu_ras_error_node_release(err_node);
3925 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
3926 struct amdgpu_smuio_mcm_config_info *mcm_info)
3928 struct ras_err_node *err_node;
3929 struct amdgpu_smuio_mcm_config_info *ref_id;
3931 if (!err_data || !mcm_info)
3934 for_each_ras_error(err_node, err_data) {
3935 ref_id = &err_node->err_info.mcm_info;
3937 if (mcm_info->socket_id == ref_id->socket_id &&
3938 mcm_info->die_id == ref_id->die_id)
3945 static struct ras_err_node *amdgpu_ras_error_node_new(void)
3947 struct ras_err_node *err_node;
3949 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
3953 INIT_LIST_HEAD(&err_node->node);
3958 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
3960 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
3961 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
3962 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
3963 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
3965 if (unlikely(infoa->socket_id != infob->socket_id))
3966 return infoa->socket_id - infob->socket_id;
3968 return infoa->die_id - infob->die_id;
3973 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
3974 struct amdgpu_smuio_mcm_config_info *mcm_info)
3976 struct ras_err_node *err_node;
3978 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
3980 return &err_node->err_info;
3982 err_node = amdgpu_ras_error_node_new();
3986 INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
3988 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
3990 err_data->err_list_count++;
3991 list_add_tail(&err_node->node, &err_data->err_node_list);
3992 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
3994 return &err_node->err_info;
3997 void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
3999 struct ras_err_addr *mca_err_addr;
4001 mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
4005 INIT_LIST_HEAD(&mca_err_addr->node);
4007 mca_err_addr->err_status = err_addr->err_status;
4008 mca_err_addr->err_ipid = err_addr->err_ipid;
4009 mca_err_addr->err_addr = err_addr->err_addr;
4011 list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
4014 void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
4016 list_del(&mca_err_addr->node);
4017 kfree(mca_err_addr);
4020 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4021 struct amdgpu_smuio_mcm_config_info *mcm_info,
4022 struct ras_err_addr *err_addr, u64 count)
4024 struct ras_err_info *err_info;
4026 if (!err_data || !mcm_info)
4032 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4036 if (err_addr && err_addr->err_status)
4037 amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4039 err_info->ue_count += count;
4040 err_data->ue_count += count;
4045 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4046 struct amdgpu_smuio_mcm_config_info *mcm_info,
4047 struct ras_err_addr *err_addr, u64 count)
4049 struct ras_err_info *err_info;
4051 if (!err_data || !mcm_info)
4057 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4061 err_info->ce_count += count;
4062 err_data->ce_count += count;
4067 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4068 struct amdgpu_smuio_mcm_config_info *mcm_info,
4069 struct ras_err_addr *err_addr, u64 count)
4071 struct ras_err_info *err_info;
4073 if (!err_data || !mcm_info)
4079 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4083 if (err_addr && err_addr->err_status)
4084 amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4086 err_info->de_count += count;
4087 err_data->de_count += count;
4092 #define mmMP0_SMN_C2PMSG_92 0x1609C
4093 #define mmMP0_SMN_C2PMSG_126 0x160BE
4094 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4095 u32 instance, u32 boot_error)
4097 u32 socket_id, aid_id, hbm_id;
4101 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4102 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4103 hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
4105 /* The pattern for smn addressing in other SOC could be different from
4106 * the one for aqua_vanjaram. We should revisit the code if the pattern
4107 * is changed. In such case, replace the aqua_vanjaram implementation
4108 * with more common helper */
4109 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4110 aqua_vanjaram_encode_ext_smn_addressing(instance);
4112 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4113 dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
4114 socket_id, aid_id, reg_data);
4116 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4117 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
4118 socket_id, aid_id, hbm_id);
4120 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4121 dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
4124 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4125 dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
4128 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4129 dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
4132 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4133 dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
4136 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4137 dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
4140 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4141 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
4142 socket_id, aid_id, hbm_id);
4144 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4145 dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
4146 socket_id, aid_id, hbm_id);
4149 static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
4150 u32 instance, u32 *boot_error)
4156 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4157 aqua_vanjaram_encode_ext_smn_addressing(instance);
4159 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4160 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4161 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
4162 *boot_error = AMDGPU_RAS_BOOT_SUCEESS;
4168 /* The pattern for smn addressing in other SOC could be different from
4169 * the one for aqua_vanjaram. We should revisit the code if the pattern
4170 * is changed. In such case, replace the aqua_vanjaram implementation
4171 * with more common helper */
4172 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4173 aqua_vanjaram_encode_ext_smn_addressing(instance);
4175 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4176 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4177 if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
4178 *boot_error = reg_data;
4184 *boot_error = reg_data;
4188 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4193 for (i = 0; i < num_instances; i++) {
4194 if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
4195 amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);