2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
39 #include "amdgpu_reset.h"
41 #ifdef CONFIG_X86_MCE_AMD
44 static bool notifier_registered;
46 static const char *RAS_FS_NAME = "ras";
48 const char *ras_error_string[] = {
52 "multi_uncorrectable",
56 const char *ras_block_string[] = {
76 const char *ras_mca_block_string[] = {
83 struct amdgpu_ras_block_list {
85 struct list_head node;
87 struct amdgpu_ras_block_object *ras_obj;
90 const char *get_ras_block_str(struct ras_common_if *ras_block)
95 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
96 return "OUT OF RANGE";
98 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
99 return ras_mca_block_string[ras_block->sub_block_index];
101 return ras_block_string[ras_block->block];
104 #define ras_block_str(_BLOCK_) \
105 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
107 #define ras_err_str(i) (ras_error_string[ffs(i)])
109 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
111 /* inject address is 52 bits */
112 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
114 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
115 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
117 enum amdgpu_ras_retire_page_reservation {
118 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
119 AMDGPU_RAS_RETIRE_PAGE_PENDING,
120 AMDGPU_RAS_RETIRE_PAGE_FAULT,
123 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
125 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
127 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
129 #ifdef CONFIG_X86_MCE_AMD
130 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
131 struct mce_notifier_adev_list {
132 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
135 static struct mce_notifier_adev_list mce_adev_list;
138 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
140 if (adev && amdgpu_ras_get_context(adev))
141 amdgpu_ras_get_context(adev)->error_query_ready = ready;
144 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
146 if (adev && amdgpu_ras_get_context(adev))
147 return amdgpu_ras_get_context(adev)->error_query_ready;
152 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
154 struct ras_err_data err_data = {0, 0, 0, NULL};
155 struct eeprom_table_record err_rec;
157 if ((address >= adev->gmc.mc_vram_size) ||
158 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
160 "RAS WARN: input address 0x%llx is invalid.\n",
165 if (amdgpu_ras_check_bad_page(adev, address)) {
167 "RAS WARN: 0x%llx has already been marked as bad page!\n",
172 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
173 err_data.err_addr = &err_rec;
174 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
176 if (amdgpu_bad_page_threshold != 0) {
177 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
178 err_data.err_addr_cnt);
179 amdgpu_ras_save_bad_pages(adev, NULL);
182 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
183 dev_warn(adev->dev, "Clear EEPROM:\n");
184 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
189 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
190 size_t size, loff_t *pos)
192 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
193 struct ras_query_if info = {
199 if (amdgpu_ras_query_error_status(obj->adev, &info))
202 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
203 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
204 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
205 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
206 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
209 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211 "ce", info.ce_count);
216 s = min_t(u64, s, size);
219 if (copy_to_user(buf, &val[*pos], s))
227 static const struct file_operations amdgpu_ras_debugfs_ops = {
228 .owner = THIS_MODULE,
229 .read = amdgpu_ras_debugfs_read,
231 .llseek = default_llseek
234 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
238 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240 if (strcmp(name, ras_block_string[i]) == 0)
246 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
247 const char __user *buf, size_t size,
248 loff_t *pos, struct ras_debug_if *data)
250 ssize_t s = min_t(u64, 64, size);
258 /* default value is 0 if the mask is not set by user */
259 u32 instance_mask = 0;
265 memset(str, 0, sizeof(str));
266 memset(data, 0, sizeof(*data));
268 if (copy_from_user(str, buf, s))
271 if (sscanf(str, "disable %32s", block_name) == 1)
273 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
275 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
277 else if (strstr(str, "retire_page") != NULL)
279 else if (str[0] && str[1] && str[2] && str[3])
280 /* ascii string, but commands are not matched. */
285 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
286 sscanf(str, "%*s %llu", &address) != 1)
290 data->inject.address = address;
295 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
298 data->head.block = block_id;
299 /* only ue and ce errors are supported */
300 if (!memcmp("ue", err, 2))
301 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
302 else if (!memcmp("ce", err, 2))
303 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
310 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
311 &sub_block, &address, &value, &instance_mask) != 4 &&
312 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
313 &sub_block, &address, &value, &instance_mask) != 4 &&
314 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
315 &sub_block, &address, &value) != 3 &&
316 sscanf(str, "%*s %*s %*s %u %llu %llu",
317 &sub_block, &address, &value) != 3)
319 data->head.sub_block_index = sub_block;
320 data->inject.address = address;
321 data->inject.value = value;
322 data->inject.instance_mask = instance_mask;
325 if (size < sizeof(*data))
328 if (copy_from_user(data, buf, sizeof(*data)))
335 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
336 struct ras_debug_if *data)
338 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
339 uint32_t mask, inst_mask = data->inject.instance_mask;
341 /* no need to set instance mask if there is only one instance */
342 if (num_xcc <= 1 && inst_mask) {
343 data->inject.instance_mask = 0;
345 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
351 switch (data->head.block) {
352 case AMDGPU_RAS_BLOCK__GFX:
353 mask = GENMASK(num_xcc - 1, 0);
355 case AMDGPU_RAS_BLOCK__SDMA:
356 mask = GENMASK(adev->sdma.num_instances - 1, 0);
358 case AMDGPU_RAS_BLOCK__VCN:
359 case AMDGPU_RAS_BLOCK__JPEG:
360 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
367 /* remove invalid bits in instance mask */
368 data->inject.instance_mask &= mask;
369 if (inst_mask != data->inject.instance_mask)
371 "Adjust RAS inject mask 0x%x to 0x%x\n",
372 inst_mask, data->inject.instance_mask);
376 * DOC: AMDGPU RAS debugfs control interface
378 * The control interface accepts struct ras_debug_if which has two members.
380 * First member: ras_debug_if::head or ras_debug_if::inject.
382 * head is used to indicate which IP block will be under control.
384 * head has four members, they are block, type, sub_block_index, name.
385 * block: which IP will be under control.
386 * type: what kind of error will be enabled/disabled/injected.
387 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
388 * name: the name of IP.
390 * inject has three more members than head, they are address, value and mask.
391 * As their names indicate, inject operation will write the
392 * value to the address.
394 * The second member: struct ras_debug_if::op.
395 * It has three kinds of operations.
397 * - 0: disable RAS on the block. Take ::head as its data.
398 * - 1: enable RAS on the block. Take ::head as its data.
399 * - 2: inject errors on the block. Take ::inject as its data.
401 * How to use the interface?
405 * Copy the struct ras_debug_if in your code and initialize it.
406 * Write the struct to the control interface.
410 * .. code-block:: bash
412 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
413 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
414 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
416 * Where N, is the card which you want to affect.
418 * "disable" requires only the block.
419 * "enable" requires the block and error type.
420 * "inject" requires the block, error type, address, and value.
422 * The block is one of: umc, sdma, gfx, etc.
423 * see ras_block_string[] for details
425 * The error type is one of: ue, ce, where,
426 * ue is multi-uncorrectable
427 * ce is single-correctable
429 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
430 * The address and value are hexadecimal numbers, leading 0x is optional.
431 * The mask means instance mask, is optional, default value is 0x1.
435 * .. code-block:: bash
437 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
438 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
439 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
441 * How to check the result of the operation?
443 * To check disable/enable, see "ras" features at,
444 * /sys/class/drm/card[0/1/2...]/device/ras/features
446 * To check inject, see the corresponding error count at,
447 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
450 * Operations are only allowed on blocks which are supported.
451 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
452 * to see which blocks support RAS on a particular asic.
455 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
456 const char __user *buf,
457 size_t size, loff_t *pos)
459 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
460 struct ras_debug_if data;
463 if (!amdgpu_ras_get_error_query_ready(adev)) {
464 dev_warn(adev->dev, "RAS WARN: error injection "
465 "currently inaccessible\n");
469 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
474 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
481 if (!amdgpu_ras_is_supported(adev, data.head.block))
486 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
489 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
492 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
493 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
494 dev_warn(adev->dev, "RAS WARN: input address "
495 "0x%llx is invalid.",
496 data.inject.address);
501 /* umc ce/ue error injection for a bad page is not allowed */
502 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
503 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
504 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
505 "already been marked as bad!\n",
506 data.inject.address);
510 amdgpu_ras_instance_mask_check(adev, &data);
512 /* data.inject.address is offset instead of absolute gpu address */
513 ret = amdgpu_ras_error_inject(adev, &data.inject);
527 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
529 * Some boards contain an EEPROM which is used to persistently store a list of
530 * bad pages which experiences ECC errors in vram. This interface provides
531 * a way to reset the EEPROM, e.g., after testing error injection.
535 * .. code-block:: bash
537 * echo 1 > ../ras/ras_eeprom_reset
539 * will reset EEPROM table to 0 entries.
542 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
543 const char __user *buf,
544 size_t size, loff_t *pos)
546 struct amdgpu_device *adev =
547 (struct amdgpu_device *)file_inode(f)->i_private;
550 ret = amdgpu_ras_eeprom_reset_table(
551 &(amdgpu_ras_get_context(adev)->eeprom_control));
554 /* Something was written to EEPROM.
556 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
563 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
564 .owner = THIS_MODULE,
566 .write = amdgpu_ras_debugfs_ctrl_write,
567 .llseek = default_llseek
570 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
571 .owner = THIS_MODULE,
573 .write = amdgpu_ras_debugfs_eeprom_write,
574 .llseek = default_llseek
578 * DOC: AMDGPU RAS sysfs Error Count Interface
580 * It allows the user to read the error count for each IP block on the gpu through
581 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
583 * It outputs the multiple lines which report the uncorrected (ue) and corrected
586 * The format of one line is below,
592 * .. code-block:: bash
598 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
599 struct device_attribute *attr, char *buf)
601 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
602 struct ras_query_if info = {
606 if (!amdgpu_ras_get_error_query_ready(obj->adev))
607 return sysfs_emit(buf, "Query currently inaccessible\n");
609 if (amdgpu_ras_query_error_status(obj->adev, &info))
612 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
613 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
614 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
615 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
618 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
619 "ce", info.ce_count);
624 #define get_obj(obj) do { (obj)->use++; } while (0)
625 #define alive_obj(obj) ((obj)->use)
627 static inline void put_obj(struct ras_manager *obj)
629 if (obj && (--obj->use == 0))
630 list_del(&obj->node);
631 if (obj && (obj->use < 0))
632 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
635 /* make one obj and return it. */
636 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
637 struct ras_common_if *head)
639 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
640 struct ras_manager *obj;
642 if (!adev->ras_enabled || !con)
645 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
648 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
649 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
652 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
654 obj = &con->objs[head->block];
656 /* already exist. return obj? */
662 list_add(&obj->node, &con->head);
668 /* return an obj equal to head, or the first when head is NULL */
669 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
670 struct ras_common_if *head)
672 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
673 struct ras_manager *obj;
676 if (!adev->ras_enabled || !con)
680 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
683 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
684 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
687 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
689 obj = &con->objs[head->block];
694 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
705 /* feature ctl begin */
706 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
707 struct ras_common_if *head)
709 return adev->ras_hw_enabled & BIT(head->block);
712 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
713 struct ras_common_if *head)
715 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
717 return con->features & BIT(head->block);
721 * if obj is not created, then create one.
722 * set feature enable flag.
724 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
725 struct ras_common_if *head, int enable)
727 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
728 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
730 /* If hardware does not support ras, then do not create obj.
731 * But if hardware support ras, we can create the obj.
732 * Ras framework checks con->hw_supported to see if it need do
733 * corresponding initialization.
734 * IP checks con->support to see if it need disable ras.
736 if (!amdgpu_ras_is_feature_allowed(adev, head))
741 obj = amdgpu_ras_create_obj(adev, head);
745 /* In case we create obj somewhere else */
748 con->features |= BIT(head->block);
750 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
751 con->features &= ~BIT(head->block);
759 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
760 struct ras_common_if *head)
762 if (amdgpu_ras_is_feature_allowed(adev, head) ||
763 amdgpu_ras_is_poison_mode_supported(adev))
769 /* wrapper of psp_ras_enable_features */
770 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
771 struct ras_common_if *head, bool enable)
773 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
774 union ta_ras_cmd_input *info;
780 if (head->block == AMDGPU_RAS_BLOCK__GFX) {
781 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
786 info->disable_features = (struct ta_ras_disable_features_input) {
787 .block_id = amdgpu_ras_block_to_ta(head->block),
788 .error_type = amdgpu_ras_error_to_ta(head->type),
791 info->enable_features = (struct ta_ras_enable_features_input) {
792 .block_id = amdgpu_ras_block_to_ta(head->block),
793 .error_type = amdgpu_ras_error_to_ta(head->type),
798 /* Do not enable if it is not allowed. */
799 if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
802 /* Only enable ras feature operation handle on host side */
803 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
804 !amdgpu_sriov_vf(adev) &&
805 !amdgpu_ras_intr_triggered()) {
806 ret = psp_ras_enable_features(&adev->psp, info, enable);
808 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
809 enable ? "enable":"disable",
810 get_ras_block_str(head),
811 amdgpu_ras_is_poison_mode_supported(adev), ret);
817 __amdgpu_ras_feature_enable(adev, head, enable);
819 if (head->block == AMDGPU_RAS_BLOCK__GFX)
824 /* Only used in device probe stage and called only once. */
825 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
826 struct ras_common_if *head, bool enable)
828 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
834 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
836 /* There is no harm to issue a ras TA cmd regardless of
837 * the currecnt ras state.
838 * If current state == target state, it will do nothing
839 * But sometimes it requests driver to reset and repost
840 * with error code -EAGAIN.
842 ret = amdgpu_ras_feature_enable(adev, head, 1);
843 /* With old ras TA, we might fail to enable ras.
844 * Log it and just setup the object.
845 * TODO need remove this WA in the future.
847 if (ret == -EINVAL) {
848 ret = __amdgpu_ras_feature_enable(adev, head, 1);
851 "RAS INFO: %s setup object\n",
852 get_ras_block_str(head));
855 /* setup the object then issue a ras TA disable cmd.*/
856 ret = __amdgpu_ras_feature_enable(adev, head, 1);
860 /* gfx block ras dsiable cmd must send to ras-ta */
861 if (head->block == AMDGPU_RAS_BLOCK__GFX)
862 con->features |= BIT(head->block);
864 ret = amdgpu_ras_feature_enable(adev, head, 0);
866 /* clean gfx block ras features flag */
867 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
868 con->features &= ~BIT(head->block);
871 ret = amdgpu_ras_feature_enable(adev, head, enable);
876 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
879 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
880 struct ras_manager *obj, *tmp;
882 list_for_each_entry_safe(obj, tmp, &con->head, node) {
884 * aka just release the obj and corresponding flags
887 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
890 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
895 return con->features;
898 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
901 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
903 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
905 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
906 struct ras_common_if head = {
908 .type = default_ras_type,
909 .sub_block_index = 0,
912 if (i == AMDGPU_RAS_BLOCK__MCA)
917 * bypass psp. vbios enable ras for us.
918 * so just create the obj
920 if (__amdgpu_ras_feature_enable(adev, &head, 1))
923 if (amdgpu_ras_feature_enable(adev, &head, 1))
928 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
929 struct ras_common_if head = {
930 .block = AMDGPU_RAS_BLOCK__MCA,
931 .type = default_ras_type,
932 .sub_block_index = i,
937 * bypass psp. vbios enable ras for us.
938 * so just create the obj
940 if (__amdgpu_ras_feature_enable(adev, &head, 1))
943 if (amdgpu_ras_feature_enable(adev, &head, 1))
948 return con->features;
950 /* feature ctl end */
952 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
953 enum amdgpu_ras_block block)
958 if (block_obj->ras_comm.block == block)
964 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
965 enum amdgpu_ras_block block, uint32_t sub_block_index)
967 struct amdgpu_ras_block_list *node, *tmp;
968 struct amdgpu_ras_block_object *obj;
970 if (block >= AMDGPU_RAS_BLOCK__LAST)
973 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
974 if (!node->ras_obj) {
975 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
980 if (obj->ras_block_match) {
981 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
984 if (amdgpu_ras_block_match_default(obj, block) == 0)
992 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
994 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
998 * choosing right query method according to
999 * whether smu support query error information
1001 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1002 if (ret == -EOPNOTSUPP) {
1003 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1004 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1005 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1007 /* umc query_ras_error_address is also responsible for clearing
1010 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1011 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1012 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1014 if (adev->umc.ras &&
1015 adev->umc.ras->ecc_info_query_ras_error_count)
1016 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1018 if (adev->umc.ras &&
1019 adev->umc.ras->ecc_info_query_ras_error_address)
1020 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1024 /* query/inject/cure begin */
1025 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
1026 struct ras_query_if *info)
1028 struct amdgpu_ras_block_object *block_obj = NULL;
1029 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1030 struct ras_err_data err_data = {0, 0, 0, NULL};
1035 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1036 amdgpu_ras_get_ecc_info(adev, &err_data);
1038 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1039 if (!block_obj || !block_obj->hw_ops) {
1040 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1041 get_ras_block_str(&info->head));
1045 if (block_obj->hw_ops->query_ras_error_count)
1046 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
1048 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1049 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1050 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1051 if (block_obj->hw_ops->query_ras_error_status)
1052 block_obj->hw_ops->query_ras_error_status(adev);
1056 obj->err_data.ue_count += err_data.ue_count;
1057 obj->err_data.ce_count += err_data.ce_count;
1059 info->ue_count = obj->err_data.ue_count;
1060 info->ce_count = obj->err_data.ce_count;
1062 if (err_data.ce_count) {
1063 if (adev->smuio.funcs &&
1064 adev->smuio.funcs->get_socket_id &&
1065 adev->smuio.funcs->get_die_id) {
1066 dev_info(adev->dev, "socket: %d, die: %d "
1067 "%ld correctable hardware errors "
1068 "detected in %s block, no user "
1069 "action is needed.\n",
1070 adev->smuio.funcs->get_socket_id(adev),
1071 adev->smuio.funcs->get_die_id(adev),
1072 obj->err_data.ce_count,
1073 get_ras_block_str(&info->head));
1075 dev_info(adev->dev, "%ld correctable hardware errors "
1076 "detected in %s block, no user "
1077 "action is needed.\n",
1078 obj->err_data.ce_count,
1079 get_ras_block_str(&info->head));
1082 if (err_data.ue_count) {
1083 if (adev->smuio.funcs &&
1084 adev->smuio.funcs->get_socket_id &&
1085 adev->smuio.funcs->get_die_id) {
1086 dev_info(adev->dev, "socket: %d, die: %d "
1087 "%ld uncorrectable hardware errors "
1088 "detected in %s block\n",
1089 adev->smuio.funcs->get_socket_id(adev),
1090 adev->smuio.funcs->get_die_id(adev),
1091 obj->err_data.ue_count,
1092 get_ras_block_str(&info->head));
1094 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1095 "detected in %s block\n",
1096 obj->err_data.ue_count,
1097 get_ras_block_str(&info->head));
1104 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1105 enum amdgpu_ras_block block)
1107 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1109 if (!amdgpu_ras_is_supported(adev, block))
1112 if (!block_obj || !block_obj->hw_ops) {
1113 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1114 ras_block_str(block));
1118 if (block_obj->hw_ops->reset_ras_error_count)
1119 block_obj->hw_ops->reset_ras_error_count(adev);
1121 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1122 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1123 if (block_obj->hw_ops->reset_ras_error_status)
1124 block_obj->hw_ops->reset_ras_error_status(adev);
1130 /* wrapper of psp_ras_trigger_error */
1131 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1132 struct ras_inject_if *info)
1134 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1135 struct ta_ras_trigger_error_input block_info = {
1136 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1137 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1138 .sub_block_index = info->head.sub_block_index,
1139 .address = info->address,
1140 .value = info->value,
1143 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1145 info->head.sub_block_index);
1147 /* inject on guest isn't allowed, return success directly */
1148 if (amdgpu_sriov_vf(adev))
1154 if (!block_obj || !block_obj->hw_ops) {
1155 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1156 get_ras_block_str(&info->head));
1160 /* Calculate XGMI relative offset */
1161 if (adev->gmc.xgmi.num_physical_nodes > 1) {
1162 block_info.address =
1163 amdgpu_xgmi_get_relative_phy_addr(adev,
1164 block_info.address);
1167 if (block_obj->hw_ops->ras_error_inject) {
1168 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1169 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1170 else /* Special ras_error_inject is defined (e.g: xgmi) */
1171 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1172 info->instance_mask);
1175 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1179 dev_err(adev->dev, "ras inject %s failed %d\n",
1180 get_ras_block_str(&info->head), ret);
1186 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1187 * @adev: pointer to AMD GPU device
1188 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1189 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1190 * @query_info: pointer to ras_query_if
1192 * Return 0 for query success or do nothing, otherwise return an error
1195 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1196 unsigned long *ce_count,
1197 unsigned long *ue_count,
1198 struct ras_query_if *query_info)
1203 /* do nothing if query_info is not specified */
1206 ret = amdgpu_ras_query_error_status(adev, query_info);
1210 *ce_count += query_info->ce_count;
1211 *ue_count += query_info->ue_count;
1213 /* some hardware/IP supports read to clear
1214 * no need to explictly reset the err status after the query call */
1215 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1216 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1217 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1219 "Failed to reset error counter and error status\n");
1226 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1227 * @adev: pointer to AMD GPU device
1228 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1229 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1231 * @query_info: pointer to ras_query_if if the query request is only for
1232 * specific ip block; if info is NULL, then the qurey request is for
1233 * all the ip blocks that support query ras error counters/status
1235 * If set, @ce_count or @ue_count, count and return the corresponding
1236 * error counts in those integer pointers. Return 0 if the device
1237 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1239 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1240 unsigned long *ce_count,
1241 unsigned long *ue_count,
1242 struct ras_query_if *query_info)
1244 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1245 struct ras_manager *obj;
1246 unsigned long ce, ue;
1249 if (!adev->ras_enabled || !con)
1252 /* Don't count since no reporting.
1254 if (!ce_count && !ue_count)
1260 /* query all the ip blocks that support ras query interface */
1261 list_for_each_entry(obj, &con->head, node) {
1262 struct ras_query_if info = {
1266 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1269 /* query specific ip block */
1270 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1284 /* query/inject/cure end */
1289 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1290 struct ras_badpage **bps, unsigned int *count);
1292 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1295 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1297 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1299 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1306 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1308 * It allows user to read the bad pages of vram on the gpu through
1309 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1311 * It outputs multiple lines, and each line stands for one gpu page.
1313 * The format of one line is below,
1314 * gpu pfn : gpu page size : flags
1316 * gpu pfn and gpu page size are printed in hex format.
1317 * flags can be one of below character,
1319 * R: reserved, this gpu page is reserved and not able to use.
1321 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1322 * in next window of page_reserve.
1324 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1328 * .. code-block:: bash
1330 * 0x00000001 : 0x00001000 : R
1331 * 0x00000002 : 0x00001000 : P
1335 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1336 struct kobject *kobj, struct bin_attribute *attr,
1337 char *buf, loff_t ppos, size_t count)
1339 struct amdgpu_ras *con =
1340 container_of(attr, struct amdgpu_ras, badpages_attr);
1341 struct amdgpu_device *adev = con->adev;
1342 const unsigned int element_size =
1343 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1344 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1345 unsigned int end = div64_ul(ppos + count - 1, element_size);
1347 struct ras_badpage *bps = NULL;
1348 unsigned int bps_count = 0;
1350 memset(buf, 0, count);
1352 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1355 for (; start < end && start < bps_count; start++)
1356 s += scnprintf(&buf[s], element_size + 1,
1357 "0x%08x : 0x%08x : %1s\n",
1360 amdgpu_ras_badpage_flags_str(bps[start].flags));
1367 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1368 struct device_attribute *attr, char *buf)
1370 struct amdgpu_ras *con =
1371 container_of(attr, struct amdgpu_ras, features_attr);
1373 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1376 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1378 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1380 sysfs_remove_file_from_group(&adev->dev->kobj,
1381 &con->badpages_attr.attr,
1385 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1387 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1388 struct attribute *attrs[] = {
1389 &con->features_attr.attr,
1392 struct attribute_group group = {
1393 .name = RAS_FS_NAME,
1397 sysfs_remove_group(&adev->dev->kobj, &group);
1402 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1403 struct ras_common_if *head)
1405 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1407 if (!obj || obj->attr_inuse)
1412 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1413 "%s_err_count", head->name);
1415 obj->sysfs_attr = (struct device_attribute){
1417 .name = obj->fs_data.sysfs_name,
1420 .show = amdgpu_ras_sysfs_read,
1422 sysfs_attr_init(&obj->sysfs_attr.attr);
1424 if (sysfs_add_file_to_group(&adev->dev->kobj,
1425 &obj->sysfs_attr.attr,
1431 obj->attr_inuse = 1;
1436 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1437 struct ras_common_if *head)
1439 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1441 if (!obj || !obj->attr_inuse)
1444 sysfs_remove_file_from_group(&adev->dev->kobj,
1445 &obj->sysfs_attr.attr,
1447 obj->attr_inuse = 0;
1453 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1455 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1456 struct ras_manager *obj, *tmp;
1458 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1459 amdgpu_ras_sysfs_remove(adev, &obj->head);
1462 if (amdgpu_bad_page_threshold != 0)
1463 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1465 amdgpu_ras_sysfs_remove_feature_node(adev);
1472 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1474 * Normally when there is an uncorrectable error, the driver will reset
1475 * the GPU to recover. However, in the event of an unrecoverable error,
1476 * the driver provides an interface to reboot the system automatically
1479 * The following file in debugfs provides that interface:
1480 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1484 * .. code-block:: bash
1486 * echo true > .../ras/auto_reboot
1490 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1492 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1493 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1494 struct drm_minor *minor = adev_to_drm(adev)->primary;
1497 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1498 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1499 &amdgpu_ras_debugfs_ctrl_ops);
1500 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1501 &amdgpu_ras_debugfs_eeprom_ops);
1502 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1503 &con->bad_page_cnt_threshold);
1504 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1505 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1506 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1507 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1508 &amdgpu_ras_debugfs_eeprom_size_ops);
1509 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1511 &amdgpu_ras_debugfs_eeprom_table_ops);
1512 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1515 * After one uncorrectable error happens, usually GPU recovery will
1516 * be scheduled. But due to the known problem in GPU recovery failing
1517 * to bring GPU back, below interface provides one direct way to
1518 * user to reboot system automatically in such case within
1519 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1520 * will never be called.
1522 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1525 * User could set this not to clean up hardware's error count register
1526 * of RAS IPs during ras recovery.
1528 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1529 &con->disable_ras_err_cnt_harvest);
1533 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1534 struct ras_fs_if *head,
1537 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1544 memcpy(obj->fs_data.debugfs_name,
1546 sizeof(obj->fs_data.debugfs_name));
1548 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1549 obj, &amdgpu_ras_debugfs_ops);
1552 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1554 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1556 struct ras_manager *obj;
1557 struct ras_fs_if fs_info;
1560 * it won't be called in resume path, no need to check
1561 * suspend and gpu reset status
1563 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1566 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1568 list_for_each_entry(obj, &con->head, node) {
1569 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1570 (obj->attr_inuse == 1)) {
1571 sprintf(fs_info.debugfs_name, "%s_err_inject",
1572 get_ras_block_str(&obj->head));
1573 fs_info.head = obj->head;
1574 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1582 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1583 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1584 static DEVICE_ATTR(features, S_IRUGO,
1585 amdgpu_ras_sysfs_features_read, NULL);
1586 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1588 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1589 struct attribute_group group = {
1590 .name = RAS_FS_NAME,
1592 struct attribute *attrs[] = {
1593 &con->features_attr.attr,
1596 struct bin_attribute *bin_attrs[] = {
1602 /* add features entry */
1603 con->features_attr = dev_attr_features;
1604 group.attrs = attrs;
1605 sysfs_attr_init(attrs[0]);
1607 if (amdgpu_bad_page_threshold != 0) {
1608 /* add bad_page_features entry */
1609 bin_attr_gpu_vram_bad_pages.private = NULL;
1610 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1611 bin_attrs[0] = &con->badpages_attr;
1612 group.bin_attrs = bin_attrs;
1613 sysfs_bin_attr_init(bin_attrs[0]);
1616 r = sysfs_create_group(&adev->dev->kobj, &group);
1618 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1623 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1625 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1626 struct ras_manager *con_obj, *ip_obj, *tmp;
1628 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1629 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1630 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1636 amdgpu_ras_sysfs_remove_all(adev);
1643 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1644 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1645 * register to check whether the interrupt is triggered or not, and properly
1646 * ack the interrupt if it is there
1648 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1650 /* Fatal error events are handled on host side */
1651 if (amdgpu_sriov_vf(adev))
1654 if (adev->nbio.ras &&
1655 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1656 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1658 if (adev->nbio.ras &&
1659 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1660 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1663 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1664 struct amdgpu_iv_entry *entry)
1666 bool poison_stat = false;
1667 struct amdgpu_device *adev = obj->adev;
1668 struct amdgpu_ras_block_object *block_obj =
1669 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1674 /* both query_poison_status and handle_poison_consumption are optional,
1675 * but at least one of them should be implemented if we need poison
1676 * consumption handler
1678 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1679 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1681 /* Not poison consumption interrupt, no need to handle it */
1682 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1683 block_obj->ras_comm.name);
1689 amdgpu_umc_poison_handler(adev, false);
1691 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1692 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1694 /* gpu reset is fallback for failed and default cases */
1696 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1697 block_obj->ras_comm.name);
1698 amdgpu_ras_reset_gpu(adev);
1700 amdgpu_gfx_poison_consumption_handler(adev, entry);
1704 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1705 struct amdgpu_iv_entry *entry)
1707 dev_info(obj->adev->dev,
1708 "Poison is created, no user action is needed.\n");
1711 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1712 struct amdgpu_iv_entry *entry)
1714 struct ras_ih_data *data = &obj->ih_data;
1715 struct ras_err_data err_data = {0, 0, 0, NULL};
1721 /* Let IP handle its data, maybe we need get the output
1722 * from the callback to update the error type/count, etc
1724 ret = data->cb(obj->adev, &err_data, entry);
1725 /* ue will trigger an interrupt, and in that case
1726 * we need do a reset to recovery the whole system.
1727 * But leave IP do that recovery, here we just dispatch
1730 if (ret == AMDGPU_RAS_SUCCESS) {
1731 /* these counts could be left as 0 if
1732 * some blocks do not count error number
1734 obj->err_data.ue_count += err_data.ue_count;
1735 obj->err_data.ce_count += err_data.ce_count;
1739 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1741 struct ras_ih_data *data = &obj->ih_data;
1742 struct amdgpu_iv_entry entry;
1744 while (data->rptr != data->wptr) {
1746 memcpy(&entry, &data->ring[data->rptr],
1747 data->element_size);
1750 data->rptr = (data->aligned_element_size +
1751 data->rptr) % data->ring_size;
1753 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1754 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1755 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1757 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1759 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1760 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1762 dev_warn(obj->adev->dev,
1763 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1768 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1770 struct ras_ih_data *data =
1771 container_of(work, struct ras_ih_data, ih_work);
1772 struct ras_manager *obj =
1773 container_of(data, struct ras_manager, ih_data);
1775 amdgpu_ras_interrupt_handler(obj);
1778 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1779 struct ras_dispatch_if *info)
1781 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1782 struct ras_ih_data *data = &obj->ih_data;
1787 if (data->inuse == 0)
1790 /* Might be overflow... */
1791 memcpy(&data->ring[data->wptr], info->entry,
1792 data->element_size);
1795 data->wptr = (data->aligned_element_size +
1796 data->wptr) % data->ring_size;
1798 schedule_work(&data->ih_work);
1803 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1804 struct ras_common_if *head)
1806 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1807 struct ras_ih_data *data;
1812 data = &obj->ih_data;
1813 if (data->inuse == 0)
1816 cancel_work_sync(&data->ih_work);
1819 memset(data, 0, sizeof(*data));
1825 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1826 struct ras_common_if *head)
1828 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1829 struct ras_ih_data *data;
1830 struct amdgpu_ras_block_object *ras_obj;
1833 /* in case we registe the IH before enable ras feature */
1834 obj = amdgpu_ras_create_obj(adev, head);
1840 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1842 data = &obj->ih_data;
1843 /* add the callback.etc */
1844 *data = (struct ras_ih_data) {
1846 .cb = ras_obj->ras_cb,
1847 .element_size = sizeof(struct amdgpu_iv_entry),
1852 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1854 data->aligned_element_size = ALIGN(data->element_size, 8);
1855 /* the ring can store 64 iv entries. */
1856 data->ring_size = 64 * data->aligned_element_size;
1857 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1869 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1871 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1872 struct ras_manager *obj, *tmp;
1874 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1875 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1882 /* traversal all IPs except NBIO to query error counter */
1883 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1885 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1886 struct ras_manager *obj;
1888 if (!adev->ras_enabled || !con)
1891 list_for_each_entry(obj, &con->head, node) {
1892 struct ras_query_if info = {
1897 * PCIE_BIF IP has one different isr by ras controller
1898 * interrupt, the specific ras counter query will be
1899 * done in that isr. So skip such block from common
1900 * sync flood interrupt isr calling.
1902 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1906 * this is a workaround for aldebaran, skip send msg to
1907 * smu to get ecc_info table due to smu handle get ecc
1908 * info table failed temporarily.
1909 * should be removed until smu fix handle ecc_info table.
1911 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1912 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1915 amdgpu_ras_query_error_status(adev, &info);
1917 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1918 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1919 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1920 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1921 dev_warn(adev->dev, "Failed to reset error counter and error status");
1926 /* Parse RdRspStatus and WrRspStatus */
1927 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1928 struct ras_query_if *info)
1930 struct amdgpu_ras_block_object *block_obj;
1932 * Only two block need to query read/write
1933 * RspStatus at current state
1935 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1936 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1939 block_obj = amdgpu_ras_get_ras_block(adev,
1941 info->head.sub_block_index);
1943 if (!block_obj || !block_obj->hw_ops) {
1944 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1945 get_ras_block_str(&info->head));
1949 if (block_obj->hw_ops->query_ras_error_status)
1950 block_obj->hw_ops->query_ras_error_status(adev);
1954 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1956 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1957 struct ras_manager *obj;
1959 if (!adev->ras_enabled || !con)
1962 list_for_each_entry(obj, &con->head, node) {
1963 struct ras_query_if info = {
1967 amdgpu_ras_error_status_query(adev, &info);
1971 /* recovery begin */
1973 /* return 0 on success.
1974 * caller need free bps.
1976 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1977 struct ras_badpage **bps, unsigned int *count)
1979 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1980 struct ras_err_handler_data *data;
1982 int ret = 0, status;
1984 if (!con || !con->eh_data || !bps || !count)
1987 mutex_lock(&con->recovery_lock);
1988 data = con->eh_data;
1989 if (!data || data->count == 0) {
1995 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2001 for (; i < data->count; i++) {
2002 (*bps)[i] = (struct ras_badpage){
2003 .bp = data->bps[i].retired_page,
2004 .size = AMDGPU_GPU_PAGE_SIZE,
2005 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2007 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2008 data->bps[i].retired_page);
2009 if (status == -EBUSY)
2010 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2011 else if (status == -ENOENT)
2012 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2015 *count = data->count;
2017 mutex_unlock(&con->recovery_lock);
2021 static void amdgpu_ras_do_recovery(struct work_struct *work)
2023 struct amdgpu_ras *ras =
2024 container_of(work, struct amdgpu_ras, recovery_work);
2025 struct amdgpu_device *remote_adev = NULL;
2026 struct amdgpu_device *adev = ras->adev;
2027 struct list_head device_list, *device_list_handle = NULL;
2029 if (!ras->disable_ras_err_cnt_harvest) {
2030 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2032 /* Build list of devices to query RAS related errors */
2033 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2034 device_list_handle = &hive->device_list;
2036 INIT_LIST_HEAD(&device_list);
2037 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2038 device_list_handle = &device_list;
2041 list_for_each_entry(remote_adev,
2042 device_list_handle, gmc.xgmi.head) {
2043 amdgpu_ras_query_err_status(remote_adev);
2044 amdgpu_ras_log_on_err_counter(remote_adev);
2047 amdgpu_put_xgmi_hive(hive);
2050 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2051 struct amdgpu_reset_context reset_context;
2052 memset(&reset_context, 0, sizeof(reset_context));
2054 reset_context.method = AMD_RESET_METHOD_NONE;
2055 reset_context.reset_req_dev = adev;
2057 /* Perform full reset in fatal error mode */
2058 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2059 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2061 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2063 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2064 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2065 reset_context.method = AMD_RESET_METHOD_MODE2;
2069 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2071 atomic_set(&ras->in_recovery, 0);
2074 /* alloc/realloc bps array */
2075 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2076 struct ras_err_handler_data *data, int pages)
2078 unsigned int old_space = data->count + data->space_left;
2079 unsigned int new_space = old_space + pages;
2080 unsigned int align_space = ALIGN(new_space, 512);
2081 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2088 memcpy(bps, data->bps,
2089 data->count * sizeof(*data->bps));
2094 data->space_left += align_space - old_space;
2098 /* it deal with vram only. */
2099 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2100 struct eeprom_table_record *bps, int pages)
2102 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2103 struct ras_err_handler_data *data;
2107 if (!con || !con->eh_data || !bps || pages <= 0)
2110 mutex_lock(&con->recovery_lock);
2111 data = con->eh_data;
2115 for (i = 0; i < pages; i++) {
2116 if (amdgpu_ras_check_bad_page_unlock(con,
2117 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2120 if (!data->space_left &&
2121 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2126 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2127 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2128 AMDGPU_GPU_PAGE_SIZE);
2130 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2135 mutex_unlock(&con->recovery_lock);
2141 * write error record array to eeprom, the function should be
2142 * protected by recovery_lock
2143 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2145 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2146 unsigned long *new_cnt)
2148 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2149 struct ras_err_handler_data *data;
2150 struct amdgpu_ras_eeprom_control *control;
2153 if (!con || !con->eh_data) {
2160 mutex_lock(&con->recovery_lock);
2161 control = &con->eeprom_control;
2162 data = con->eh_data;
2163 save_count = data->count - control->ras_num_recs;
2164 mutex_unlock(&con->recovery_lock);
2167 *new_cnt = save_count / adev->umc.retire_unit;
2169 /* only new entries are saved */
2170 if (save_count > 0) {
2171 if (amdgpu_ras_eeprom_append(control,
2172 &data->bps[control->ras_num_recs],
2174 dev_err(adev->dev, "Failed to save EEPROM table data!");
2178 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2185 * read error record array in eeprom and reserve enough space for
2186 * storing new bad pages
2188 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2190 struct amdgpu_ras_eeprom_control *control =
2191 &adev->psp.ras_context.ras->eeprom_control;
2192 struct eeprom_table_record *bps;
2195 /* no bad page record, skip eeprom access */
2196 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2199 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2203 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2205 dev_err(adev->dev, "Failed to load EEPROM table records!");
2207 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2213 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2216 struct ras_err_handler_data *data = con->eh_data;
2219 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2220 for (i = 0; i < data->count; i++)
2221 if (addr == data->bps[i].retired_page)
2228 * check if an address belongs to bad page
2230 * Note: this check is only for umc block
2232 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2235 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2238 if (!con || !con->eh_data)
2241 mutex_lock(&con->recovery_lock);
2242 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2243 mutex_unlock(&con->recovery_lock);
2247 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2250 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2253 * Justification of value bad_page_cnt_threshold in ras structure
2255 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2256 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2257 * scenarios accordingly.
2259 * Bad page retirement enablement:
2260 * - If amdgpu_bad_page_threshold = -2,
2261 * bad_page_cnt_threshold = typical value by formula.
2263 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2264 * max record length in eeprom, use it directly.
2266 * Bad page retirement disablement:
2267 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2268 * functionality is disabled, and bad_page_cnt_threshold will
2272 if (amdgpu_bad_page_threshold < 0) {
2273 u64 val = adev->gmc.mc_vram_size;
2275 do_div(val, RAS_BAD_PAGE_COVER);
2276 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2279 con->bad_page_cnt_threshold = min_t(int, max_count,
2280 amdgpu_bad_page_threshold);
2284 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2286 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2287 struct ras_err_handler_data **data;
2288 u32 max_eeprom_records_count = 0;
2289 bool exc_err_limit = false;
2292 if (!con || amdgpu_sriov_vf(adev))
2295 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2296 * supports RAS and debugfs is enabled, but when
2297 * adev->ras_enabled is unset, i.e. when "ras_enable"
2298 * module parameter is set to 0.
2302 if (!adev->ras_enabled)
2305 data = &con->eh_data;
2306 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2312 mutex_init(&con->recovery_lock);
2313 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2314 atomic_set(&con->in_recovery, 0);
2315 con->eeprom_control.bad_channel_bitmap = 0;
2317 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2318 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2320 /* Todo: During test the SMU might fail to read the eeprom through I2C
2321 * when the GPU is pending on XGMI reset during probe time
2322 * (Mostly after second bus reset), skip it now
2324 if (adev->gmc.xgmi.pending_reset)
2326 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2328 * This calling fails when exc_err_limit is true or
2331 if (exc_err_limit || ret)
2334 if (con->eeprom_control.ras_num_recs) {
2335 ret = amdgpu_ras_load_bad_pages(adev);
2339 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2341 if (con->update_channel_flag == true) {
2342 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2343 con->update_channel_flag = false;
2347 #ifdef CONFIG_X86_MCE_AMD
2348 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2349 (adev->gmc.xgmi.connected_to_cpu))
2350 amdgpu_register_bad_pages_mca_notifier(adev);
2355 kfree((*data)->bps);
2357 con->eh_data = NULL;
2359 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2362 * Except error threshold exceeding case, other failure cases in this
2363 * function would not fail amdgpu driver init.
2373 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2375 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2376 struct ras_err_handler_data *data = con->eh_data;
2378 /* recovery_init failed to init it, fini is useless */
2382 cancel_work_sync(&con->recovery_work);
2384 mutex_lock(&con->recovery_lock);
2385 con->eh_data = NULL;
2388 mutex_unlock(&con->recovery_lock);
2394 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2396 if (amdgpu_sriov_vf(adev)) {
2397 switch (adev->ip_versions[MP0_HWIP][0]) {
2398 case IP_VERSION(13, 0, 2):
2405 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2406 switch (adev->ip_versions[MP0_HWIP][0]) {
2407 case IP_VERSION(13, 0, 0):
2408 case IP_VERSION(13, 0, 10):
2415 return adev->asic_type == CHIP_VEGA10 ||
2416 adev->asic_type == CHIP_VEGA20 ||
2417 adev->asic_type == CHIP_ARCTURUS ||
2418 adev->asic_type == CHIP_ALDEBARAN ||
2419 adev->asic_type == CHIP_SIENNA_CICHLID;
2423 * this is workaround for vega20 workstation sku,
2424 * force enable gfx ras, ignore vbios gfx ras flag
2425 * due to GC EDC can not write
2427 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2429 struct atom_context *ctx = adev->mode_info.atom_context;
2434 if (strnstr(ctx->vbios_version, "D16406",
2435 sizeof(ctx->vbios_version)) ||
2436 strnstr(ctx->vbios_version, "D36002",
2437 sizeof(ctx->vbios_version)))
2438 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2442 * check hardware's ras ability which will be saved in hw_supported.
2443 * if hardware does not support ras, we can skip some ras initializtion and
2444 * forbid some ras operations from IP.
2445 * if software itself, say boot parameter, limit the ras ability. We still
2446 * need allow IP do some limited operations, like disable. In such case,
2447 * we have to initialize ras as normal. but need check if operation is
2448 * allowed or not in each function.
2450 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2452 adev->ras_hw_enabled = adev->ras_enabled = 0;
2454 if (!amdgpu_ras_asic_supported(adev))
2457 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2458 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2459 dev_info(adev->dev, "MEM ECC is active.\n");
2460 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2461 1 << AMDGPU_RAS_BLOCK__DF);
2463 dev_info(adev->dev, "MEM ECC is not presented.\n");
2466 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2467 dev_info(adev->dev, "SRAM ECC is active.\n");
2468 if (!amdgpu_sriov_vf(adev))
2469 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2470 1 << AMDGPU_RAS_BLOCK__DF);
2472 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2473 1 << AMDGPU_RAS_BLOCK__SDMA |
2474 1 << AMDGPU_RAS_BLOCK__GFX);
2476 /* VCN/JPEG RAS can be supported on both bare metal and
2479 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2480 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2481 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2482 1 << AMDGPU_RAS_BLOCK__JPEG);
2484 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2485 1 << AMDGPU_RAS_BLOCK__JPEG);
2488 * XGMI RAS is not supported if xgmi num physical nodes
2491 if (!adev->gmc.xgmi.num_physical_nodes)
2492 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2494 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2497 /* driver only manages a few IP blocks RAS feature
2498 * when GPU is connected cpu through XGMI */
2499 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2500 1 << AMDGPU_RAS_BLOCK__SDMA |
2501 1 << AMDGPU_RAS_BLOCK__MMHUB);
2504 amdgpu_ras_get_quirks(adev);
2506 /* hw_supported needs to be aligned with RAS block mask. */
2507 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2509 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2510 adev->ras_hw_enabled & amdgpu_ras_mask;
2513 static void amdgpu_ras_counte_dw(struct work_struct *work)
2515 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2516 ras_counte_delay_work.work);
2517 struct amdgpu_device *adev = con->adev;
2518 struct drm_device *dev = adev_to_drm(adev);
2519 unsigned long ce_count, ue_count;
2522 res = pm_runtime_get_sync(dev->dev);
2526 /* Cache new values.
2528 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2529 atomic_set(&con->ras_ce_count, ce_count);
2530 atomic_set(&con->ras_ue_count, ue_count);
2533 pm_runtime_mark_last_busy(dev->dev);
2535 pm_runtime_put_autosuspend(dev->dev);
2538 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2540 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2541 bool df_poison, umc_poison;
2543 /* poison setting is useless on SRIOV guest */
2544 if (amdgpu_sriov_vf(adev) || !con)
2547 /* Init poison supported flag, the default value is false */
2548 if (adev->gmc.xgmi.connected_to_cpu) {
2549 /* enabled by default when GPU is connected to CPU */
2550 con->poison_supported = true;
2551 } else if (adev->df.funcs &&
2552 adev->df.funcs->query_ras_poison_mode &&
2554 adev->umc.ras->query_ras_poison_mode) {
2556 adev->df.funcs->query_ras_poison_mode(adev);
2558 adev->umc.ras->query_ras_poison_mode(adev);
2560 /* Only poison is set in both DF and UMC, we can support it */
2561 if (df_poison && umc_poison)
2562 con->poison_supported = true;
2563 else if (df_poison != umc_poison)
2565 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2566 df_poison, umc_poison);
2570 int amdgpu_ras_init(struct amdgpu_device *adev)
2572 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2578 con = kmalloc(sizeof(struct amdgpu_ras) +
2579 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2580 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2581 GFP_KERNEL|__GFP_ZERO);
2586 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2587 atomic_set(&con->ras_ce_count, 0);
2588 atomic_set(&con->ras_ue_count, 0);
2590 con->objs = (struct ras_manager *)(con + 1);
2592 amdgpu_ras_set_context(adev, con);
2594 amdgpu_ras_check_supported(adev);
2596 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2597 /* set gfx block ras context feature for VEGA20 Gaming
2598 * send ras disable cmd to ras ta during ras late init.
2600 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2601 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2610 con->update_channel_flag = false;
2612 INIT_LIST_HEAD(&con->head);
2613 /* Might need get this flag from vbios. */
2614 con->flags = RAS_DEFAULT_FLAGS;
2616 /* initialize nbio ras function ahead of any other
2617 * ras functions so hardware fatal error interrupt
2618 * can be enabled as early as possible */
2619 switch (adev->ip_versions[NBIO_HWIP][0]) {
2620 case IP_VERSION(7, 4, 0):
2621 case IP_VERSION(7, 4, 1):
2622 case IP_VERSION(7, 4, 4):
2623 if (!adev->gmc.xgmi.connected_to_cpu)
2624 adev->nbio.ras = &nbio_v7_4_ras;
2626 case IP_VERSION(4, 3, 0):
2627 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2628 /* unlike other generation of nbio ras,
2629 * nbio v4_3 only support fatal error interrupt
2630 * to inform software that DF is freezed due to
2631 * system fatal error event. driver should not
2632 * enable nbio ras in such case. Instead,
2634 adev->nbio.ras = &nbio_v4_3_ras;
2637 /* nbio ras is not available */
2641 /* nbio ras block needs to be enabled ahead of other ras blocks
2642 * to handle fatal error */
2643 r = amdgpu_nbio_ras_sw_init(adev);
2647 if (adev->nbio.ras &&
2648 adev->nbio.ras->init_ras_controller_interrupt) {
2649 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2654 if (adev->nbio.ras &&
2655 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2656 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2661 amdgpu_ras_query_poison_mode(adev);
2663 if (amdgpu_ras_fs_init(adev)) {
2668 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2669 "hardware ability[%x] ras_mask[%x]\n",
2670 adev->ras_hw_enabled, adev->ras_enabled);
2674 amdgpu_ras_set_context(adev, NULL);
2680 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2682 if (adev->gmc.xgmi.connected_to_cpu ||
2683 adev->gmc.is_app_apu)
2688 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2689 struct ras_common_if *ras_block)
2691 struct ras_query_if info = {
2695 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2698 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2699 DRM_WARN("RAS init harvest failure");
2701 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2702 DRM_WARN("RAS init harvest reset failure");
2707 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2709 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2714 return con->poison_supported;
2717 /* helper function to handle common stuff in ip late init phase */
2718 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2719 struct ras_common_if *ras_block)
2721 struct amdgpu_ras_block_object *ras_obj = NULL;
2722 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2723 struct ras_query_if *query_info;
2724 unsigned long ue_count, ce_count;
2727 /* disable RAS feature per IP block if it is not supported */
2728 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2729 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2733 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2735 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2736 /* in resume phase, if fail to enable ras,
2737 * clean up all ras fs nodes, and disable ras */
2743 /* check for errors on warm reset edc persisant supported ASIC */
2744 amdgpu_persistent_edc_harvesting(adev, ras_block);
2746 /* in resume phase, no need to create ras fs node */
2747 if (adev->in_suspend || amdgpu_in_reset(adev))
2750 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2751 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2752 (ras_obj->hw_ops->query_poison_status ||
2753 ras_obj->hw_ops->handle_poison_consumption))) {
2754 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2759 r = amdgpu_ras_sysfs_create(adev, ras_block);
2763 /* Those are the cached values at init.
2765 query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
2768 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2770 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2771 atomic_set(&con->ras_ce_count, ce_count);
2772 atomic_set(&con->ras_ue_count, ue_count);
2779 if (ras_obj->ras_cb)
2780 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2782 amdgpu_ras_feature_enable(adev, ras_block, 0);
2786 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2787 struct ras_common_if *ras_block)
2789 return amdgpu_ras_block_late_init(adev, ras_block);
2792 /* helper function to remove ras fs node and interrupt handler */
2793 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2794 struct ras_common_if *ras_block)
2796 struct amdgpu_ras_block_object *ras_obj;
2800 amdgpu_ras_sysfs_remove(adev, ras_block);
2802 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2803 if (ras_obj->ras_cb)
2804 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2807 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2808 struct ras_common_if *ras_block)
2810 return amdgpu_ras_block_late_fini(adev, ras_block);
2813 /* do some init work after IP late init as dependence.
2814 * and it runs in resume/gpu reset/booting up cases.
2816 void amdgpu_ras_resume(struct amdgpu_device *adev)
2818 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2819 struct ras_manager *obj, *tmp;
2821 if (!adev->ras_enabled || !con) {
2822 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2823 amdgpu_release_ras_context(adev);
2828 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2829 /* Set up all other IPs which are not implemented. There is a
2830 * tricky thing that IP's actual ras error type should be
2831 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2832 * ERROR_NONE make sense anyway.
2834 amdgpu_ras_enable_all_features(adev, 1);
2836 /* We enable ras on all hw_supported block, but as boot
2837 * parameter might disable some of them and one or more IP has
2838 * not implemented yet. So we disable them on behalf.
2840 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2841 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2842 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2843 /* there should be no any reference. */
2844 WARN_ON(alive_obj(obj));
2850 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2852 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2854 if (!adev->ras_enabled || !con)
2857 amdgpu_ras_disable_all_features(adev, 0);
2858 /* Make sure all ras objects are disabled. */
2860 amdgpu_ras_disable_all_features(adev, 1);
2863 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2865 struct amdgpu_ras_block_list *node, *tmp;
2866 struct amdgpu_ras_block_object *obj;
2869 /* Guest side doesn't need init ras feature */
2870 if (amdgpu_sriov_vf(adev))
2873 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2874 if (!node->ras_obj) {
2875 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2879 obj = node->ras_obj;
2880 if (obj->ras_late_init) {
2881 r = obj->ras_late_init(adev, &obj->ras_comm);
2883 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2884 obj->ras_comm.name, r);
2888 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2894 /* do some fini work before IP fini as dependence */
2895 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2897 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2899 if (!adev->ras_enabled || !con)
2903 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2905 amdgpu_ras_disable_all_features(adev, 0);
2906 amdgpu_ras_recovery_fini(adev);
2910 int amdgpu_ras_fini(struct amdgpu_device *adev)
2912 struct amdgpu_ras_block_list *ras_node, *tmp;
2913 struct amdgpu_ras_block_object *obj = NULL;
2914 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2916 if (!adev->ras_enabled || !con)
2919 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2920 if (ras_node->ras_obj) {
2921 obj = ras_node->ras_obj;
2922 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2924 obj->ras_fini(adev, &obj->ras_comm);
2926 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2929 /* Clear ras blocks from ras_list and free ras block list node */
2930 list_del(&ras_node->node);
2934 amdgpu_ras_fs_fini(adev);
2935 amdgpu_ras_interrupt_remove_all(adev);
2937 WARN(con->features, "Feature mask is not cleared");
2940 amdgpu_ras_disable_all_features(adev, 1);
2942 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2944 amdgpu_ras_set_context(adev, NULL);
2950 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2952 amdgpu_ras_check_supported(adev);
2953 if (!adev->ras_hw_enabled)
2956 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2957 dev_info(adev->dev, "uncorrectable hardware error"
2958 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2960 amdgpu_ras_reset_gpu(adev);
2964 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2966 if (adev->asic_type == CHIP_VEGA20 &&
2967 adev->pm.fw_version <= 0x283400) {
2968 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2969 amdgpu_ras_intr_triggered();
2975 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2977 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2982 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2983 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2984 amdgpu_ras_set_context(adev, NULL);
2989 #ifdef CONFIG_X86_MCE_AMD
2990 static struct amdgpu_device *find_adev(uint32_t node_id)
2993 struct amdgpu_device *adev = NULL;
2995 for (i = 0; i < mce_adev_list.num_gpu; i++) {
2996 adev = mce_adev_list.devs[i];
2998 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2999 adev->gmc.xgmi.physical_node_id == node_id)
3007 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3008 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3009 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3010 #define GPU_ID_OFFSET 8
3012 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3013 unsigned long val, void *data)
3015 struct mce *m = (struct mce *)data;
3016 struct amdgpu_device *adev = NULL;
3017 uint32_t gpu_id = 0;
3018 uint32_t umc_inst = 0, ch_inst = 0;
3021 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3022 * and error occurred in DramECC (Extended error code = 0) then only
3023 * process the error, else bail out.
3025 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3026 (XEC(m->status, 0x3f) == 0x0)))
3030 * If it is correctable error, return.
3032 if (mce_is_correctable(m))
3036 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3038 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3040 adev = find_adev(gpu_id);
3042 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3048 * If it is uncorrectable error, then find out UMC instance and
3051 umc_inst = GET_UMC_INST(m->ipid);
3052 ch_inst = GET_CHAN_INDEX(m->ipid);
3054 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3057 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3063 static struct notifier_block amdgpu_bad_page_nb = {
3064 .notifier_call = amdgpu_bad_page_notifier,
3065 .priority = MCE_PRIO_UC,
3068 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3071 * Add the adev to the mce_adev_list.
3072 * During mode2 reset, amdgpu device is temporarily
3073 * removed from the mgpu_info list which can cause
3074 * page retirement to fail.
3075 * Use this list instead of mgpu_info to find the amdgpu
3076 * device on which the UMC error was reported.
3078 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3081 * Register the x86 notifier only once
3082 * with MCE subsystem.
3084 if (notifier_registered == false) {
3085 mce_register_decode_chain(&amdgpu_bad_page_nb);
3086 notifier_registered = true;
3091 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3096 return adev->psp.ras_context.ras;
3099 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3104 adev->psp.ras_context.ras = ras_con;
3108 /* check if ras is supported on block, say, sdma, gfx */
3109 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3113 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3115 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3118 ret = ras && (adev->ras_enabled & (1 << block));
3120 /* For the special asic with mem ecc enabled but sram ecc
3121 * not enabled, even if the ras block is not supported on
3122 * .ras_enabled, if the asic supports poison mode and the
3123 * ras block has ras configuration, it can be considered
3124 * that the ras block supports ras function.
3127 amdgpu_ras_is_poison_mode_supported(adev) &&
3128 amdgpu_ras_get_ras_block(adev, block, 0))
3134 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3136 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3138 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3139 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3144 /* Register each ip ras block into amdgpu ras */
3145 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3146 struct amdgpu_ras_block_object *ras_block_obj)
3148 struct amdgpu_ras_block_list *ras_node;
3149 if (!adev || !ras_block_obj)
3152 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3156 INIT_LIST_HEAD(&ras_node->node);
3157 ras_node->ras_obj = ras_block_obj;
3158 list_add_tail(&ras_node->node, &adev->ras_list);
3163 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3169 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3170 sprintf(err_type_name, "correctable");
3172 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3173 sprintf(err_type_name, "uncorrectable");
3176 sprintf(err_type_name, "unknown");
3181 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3182 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3184 uint32_t *memory_id)
3186 uint32_t err_status_lo_data, err_status_lo_offset;
3191 err_status_lo_offset =
3192 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3193 reg_entry->seg_lo, reg_entry->reg_lo);
3194 err_status_lo_data = RREG32(err_status_lo_offset);
3196 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3197 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3200 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3205 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3206 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3208 unsigned long *err_cnt)
3210 uint32_t err_status_hi_data, err_status_hi_offset;
3215 err_status_hi_offset =
3216 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3217 reg_entry->seg_hi, reg_entry->reg_hi);
3218 err_status_hi_data = RREG32(err_status_hi_offset);
3220 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3221 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3222 /* keep the check here in case we need to refer to the result later */
3223 dev_dbg(adev->dev, "Invalid err_info field\n");
3225 /* read err count */
3226 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3231 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3232 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3233 uint32_t reg_list_size,
3234 const struct amdgpu_ras_memory_id_entry *mem_list,
3235 uint32_t mem_list_size,
3238 unsigned long *err_count)
3241 unsigned long err_cnt;
3242 char err_type_name[16];
3245 for (i = 0; i < reg_list_size; i++) {
3246 /* query memory_id from err_status_lo */
3247 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3248 instance, &memory_id))
3251 /* query err_cnt from err_status_hi */
3252 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3253 instance, &err_cnt) ||
3257 *err_count += err_cnt;
3259 /* log the errors */
3260 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3262 /* memory_list is not supported */
3264 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3265 err_cnt, err_type_name,
3266 reg_list[i].block_name,
3267 instance, memory_id);
3269 for (j = 0; j < mem_list_size; j++) {
3270 if (memory_id == mem_list[j].memory_id) {
3272 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3273 err_cnt, err_type_name,
3274 reg_list[i].block_name,
3275 instance, mem_list[j].name);
3283 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3284 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3285 uint32_t reg_list_size,
3288 uint32_t err_status_lo_offset, err_status_hi_offset;
3291 for (i = 0; i < reg_list_size; i++) {
3292 err_status_lo_offset =
3293 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3294 reg_list[i].seg_lo, reg_list[i].reg_lo);
3295 err_status_hi_offset =
3296 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3297 reg_list[i].seg_hi, reg_list[i].reg_hi);
3298 WREG32(err_status_lo_offset, 0);
3299 WREG32(err_status_hi_offset, 0);