2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
36 const char *ras_error_string[] = {
40 "multi_uncorrectable",
44 const char *ras_block_string[] = {
61 #define ras_err_str(i) (ras_error_string[ffs(i)])
62 #define ras_block_str(i) (ras_block_string[i])
64 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
65 #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
66 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
68 /* inject address is 52 bits */
69 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
71 enum amdgpu_ras_retire_page_reservation {
72 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
73 AMDGPU_RAS_RETIRE_PAGE_PENDING,
74 AMDGPU_RAS_RETIRE_PAGE_FAULT,
77 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
79 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
82 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
83 size_t size, loff_t *pos)
85 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
86 struct ras_query_if info = {
92 if (amdgpu_ras_error_query(obj->adev, &info))
95 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
102 s = min_t(u64, s, size);
105 if (copy_to_user(buf, &val[*pos], s))
113 static const struct file_operations amdgpu_ras_debugfs_ops = {
114 .owner = THIS_MODULE,
115 .read = amdgpu_ras_debugfs_read,
117 .llseek = default_llseek
120 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
124 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
126 if (strcmp(name, ras_block_str(i)) == 0)
132 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
133 const char __user *buf, size_t size,
134 loff_t *pos, struct ras_debug_if *data)
136 ssize_t s = min_t(u64, 64, size);
149 memset(str, 0, sizeof(str));
150 memset(data, 0, sizeof(*data));
152 if (copy_from_user(str, buf, s))
155 if (sscanf(str, "disable %32s", block_name) == 1)
157 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
159 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
161 else if (str[0] && str[1] && str[2] && str[3])
162 /* ascii string, but commands are not matched. */
166 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
169 data->head.block = block_id;
170 /* only ue and ce errors are supported */
171 if (!memcmp("ue", err, 2))
172 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
173 else if (!memcmp("ce", err, 2))
174 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
181 if (sscanf(str, "%*s %*s %*s %u %llu %llu",
182 &sub_block, &address, &value) != 3)
183 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
184 &sub_block, &address, &value) != 3)
186 data->head.sub_block_index = sub_block;
187 data->inject.address = address;
188 data->inject.value = value;
191 if (size < sizeof(*data))
194 if (copy_from_user(data, buf, sizeof(*data)))
201 static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
202 struct ras_common_if *head);
205 * DOC: AMDGPU RAS debugfs control interface
207 * It accepts struct ras_debug_if who has two members.
209 * First member: ras_debug_if::head or ras_debug_if::inject.
211 * head is used to indicate which IP block will be under control.
213 * head has four members, they are block, type, sub_block_index, name.
214 * block: which IP will be under control.
215 * type: what kind of error will be enabled/disabled/injected.
216 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
217 * name: the name of IP.
219 * inject has two more members than head, they are address, value.
220 * As their names indicate, inject operation will write the
221 * value to the address.
223 * The second member: struct ras_debug_if::op.
224 * It has three kinds of operations.
226 * - 0: disable RAS on the block. Take ::head as its data.
227 * - 1: enable RAS on the block. Take ::head as its data.
228 * - 2: inject errors on the block. Take ::inject as its data.
230 * How to use the interface?
234 * Copy the struct ras_debug_if in your codes and initialize it.
235 * Write the struct to the control node.
239 * .. code-block:: bash
241 * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
245 * op: disable, enable, inject
246 * disable: only block is needed
247 * enable: block and error are needed
248 * inject: error, address, value are needed
249 * block: umc, sdma, gfx, .........
250 * see ras_block_string[] for details
252 * ue: multi_uncorrectable
253 * ce: single_correctable
255 * sub block index, pass 0 if there is no sub block
257 * here are some examples for bash commands:
259 * .. code-block:: bash
261 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
262 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
263 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
265 * How to check the result?
267 * For disable/enable, please check ras features at
268 * /sys/class/drm/card[0/1/2...]/device/ras/features
270 * For inject, please check corresponding err count at
271 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
274 * Operations are only allowed on blocks which are supported.
275 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
276 * to see which blocks support RAS on a particular asic.
279 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
280 size_t size, loff_t *pos)
282 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
283 struct ras_debug_if data;
286 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
290 if (!amdgpu_ras_is_supported(adev, data.head.block))
295 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
298 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
301 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
302 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
307 /* umc ce/ue error injection for a bad page is not allowed */
308 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
309 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
310 DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
311 data.inject.address);
315 /* data.inject.address is offset instead of absolute gpu address */
316 ret = amdgpu_ras_error_inject(adev, &data.inject);
330 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
332 * Some boards contain an EEPROM which is used to persistently store a list of
333 * bad pages which experiences ECC errors in vram. This interface provides
334 * a way to reset the EEPROM, e.g., after testing error injection.
338 * .. code-block:: bash
340 * echo 1 > ../ras/ras_eeprom_reset
342 * will reset EEPROM table to 0 entries.
345 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
346 size_t size, loff_t *pos)
348 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
351 ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
353 return ret == 1 ? size : -EIO;
356 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
357 .owner = THIS_MODULE,
359 .write = amdgpu_ras_debugfs_ctrl_write,
360 .llseek = default_llseek
363 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
364 .owner = THIS_MODULE,
366 .write = amdgpu_ras_debugfs_eeprom_write,
367 .llseek = default_llseek
371 * DOC: AMDGPU RAS sysfs Error Count Interface
373 * It allows the user to read the error count for each IP block on the gpu through
374 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
376 * It outputs the multiple lines which report the uncorrected (ue) and corrected
379 * The format of one line is below,
385 * .. code-block:: bash
391 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
392 struct device_attribute *attr, char *buf)
394 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
395 struct ras_query_if info = {
399 if (amdgpu_ras_error_query(obj->adev, &info))
402 return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
404 "ce", info.ce_count);
409 #define get_obj(obj) do { (obj)->use++; } while (0)
410 #define alive_obj(obj) ((obj)->use)
412 static inline void put_obj(struct ras_manager *obj)
414 if (obj && --obj->use == 0)
415 list_del(&obj->node);
416 if (obj && obj->use < 0) {
417 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
421 /* make one obj and return it. */
422 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
423 struct ras_common_if *head)
425 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
426 struct ras_manager *obj;
431 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
434 obj = &con->objs[head->block];
435 /* already exist. return obj? */
441 list_add(&obj->node, &con->head);
447 /* return an obj equal to head, or the first when head is NULL */
448 static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
449 struct ras_common_if *head)
451 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
452 struct ras_manager *obj;
459 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
462 obj = &con->objs[head->block];
464 if (alive_obj(obj)) {
465 WARN_ON(head->block != obj->head.block);
469 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
471 if (alive_obj(obj)) {
472 WARN_ON(i != obj->head.block);
482 /* feature ctl begin */
483 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
484 struct ras_common_if *head)
486 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
488 return con->hw_supported & BIT(head->block);
491 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
492 struct ras_common_if *head)
494 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
496 return con->features & BIT(head->block);
500 * if obj is not created, then create one.
501 * set feature enable flag.
503 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
504 struct ras_common_if *head, int enable)
506 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
507 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
509 /* If hardware does not support ras, then do not create obj.
510 * But if hardware support ras, we can create the obj.
511 * Ras framework checks con->hw_supported to see if it need do
512 * corresponding initialization.
513 * IP checks con->support to see if it need disable ras.
515 if (!amdgpu_ras_is_feature_allowed(adev, head))
517 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
522 obj = amdgpu_ras_create_obj(adev, head);
526 /* In case we create obj somewhere else */
529 con->features |= BIT(head->block);
531 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
532 con->features &= ~BIT(head->block);
540 /* wrapper of psp_ras_enable_features */
541 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
542 struct ras_common_if *head, bool enable)
544 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
545 union ta_ras_cmd_input info;
552 info.disable_features = (struct ta_ras_disable_features_input) {
553 .block_id = amdgpu_ras_block_to_ta(head->block),
554 .error_type = amdgpu_ras_error_to_ta(head->type),
557 info.enable_features = (struct ta_ras_enable_features_input) {
558 .block_id = amdgpu_ras_block_to_ta(head->block),
559 .error_type = amdgpu_ras_error_to_ta(head->type),
563 /* Do not enable if it is not allowed. */
564 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
565 /* Are we alerady in that state we are going to set? */
566 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
569 if (!amdgpu_ras_intr_triggered()) {
570 ret = psp_ras_enable_features(&adev->psp, &info, enable);
572 DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
573 enable ? "enable":"disable",
574 ras_block_str(head->block),
576 if (ret == TA_RAS_STATUS__RESET_NEEDED)
583 __amdgpu_ras_feature_enable(adev, head, enable);
588 /* Only used in device probe stage and called only once. */
589 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
590 struct ras_common_if *head, bool enable)
592 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
598 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
600 /* There is no harm to issue a ras TA cmd regardless of
601 * the currecnt ras state.
602 * If current state == target state, it will do nothing
603 * But sometimes it requests driver to reset and repost
604 * with error code -EAGAIN.
606 ret = amdgpu_ras_feature_enable(adev, head, 1);
607 /* With old ras TA, we might fail to enable ras.
608 * Log it and just setup the object.
609 * TODO need remove this WA in the future.
611 if (ret == -EINVAL) {
612 ret = __amdgpu_ras_feature_enable(adev, head, 1);
614 DRM_INFO("RAS INFO: %s setup object\n",
615 ras_block_str(head->block));
618 /* setup the object then issue a ras TA disable cmd.*/
619 ret = __amdgpu_ras_feature_enable(adev, head, 1);
623 ret = amdgpu_ras_feature_enable(adev, head, 0);
626 ret = amdgpu_ras_feature_enable(adev, head, enable);
631 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
634 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
635 struct ras_manager *obj, *tmp;
637 list_for_each_entry_safe(obj, tmp, &con->head, node) {
639 * aka just release the obj and corresponding flags
642 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
645 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
650 return con->features;
653 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
656 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
657 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
659 const enum amdgpu_ras_error_type default_ras_type =
660 AMDGPU_RAS_ERROR__NONE;
662 for (i = 0; i < ras_block_count; i++) {
663 struct ras_common_if head = {
665 .type = default_ras_type,
666 .sub_block_index = 0,
668 strcpy(head.name, ras_block_str(i));
671 * bypass psp. vbios enable ras for us.
672 * so just create the obj
674 if (__amdgpu_ras_feature_enable(adev, &head, 1))
677 if (amdgpu_ras_feature_enable(adev, &head, 1))
682 return con->features;
684 /* feature ctl end */
686 /* query/inject/cure begin */
687 int amdgpu_ras_error_query(struct amdgpu_device *adev,
688 struct ras_query_if *info)
690 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
691 struct ras_err_data err_data = {0, 0, 0, NULL};
696 switch (info->head.block) {
697 case AMDGPU_RAS_BLOCK__UMC:
698 if (adev->umc.funcs->query_ras_error_count)
699 adev->umc.funcs->query_ras_error_count(adev, &err_data);
700 /* umc query_ras_error_address is also responsible for clearing
703 if (adev->umc.funcs->query_ras_error_address)
704 adev->umc.funcs->query_ras_error_address(adev, &err_data);
706 case AMDGPU_RAS_BLOCK__GFX:
707 if (adev->gfx.funcs->query_ras_error_count)
708 adev->gfx.funcs->query_ras_error_count(adev, &err_data);
710 case AMDGPU_RAS_BLOCK__MMHUB:
711 if (adev->mmhub.funcs->query_ras_error_count)
712 adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
714 case AMDGPU_RAS_BLOCK__PCIE_BIF:
715 if (adev->nbio.funcs->query_ras_error_count)
716 adev->nbio.funcs->query_ras_error_count(adev, &err_data);
722 obj->err_data.ue_count += err_data.ue_count;
723 obj->err_data.ce_count += err_data.ce_count;
725 info->ue_count = obj->err_data.ue_count;
726 info->ce_count = obj->err_data.ce_count;
728 if (err_data.ce_count) {
729 dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
730 obj->err_data.ce_count, ras_block_str(info->head.block));
732 if (err_data.ue_count) {
733 dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
734 obj->err_data.ue_count, ras_block_str(info->head.block));
740 /* wrapper of psp_ras_trigger_error */
741 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
742 struct ras_inject_if *info)
744 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
745 struct ta_ras_trigger_error_input block_info = {
746 .block_id = amdgpu_ras_block_to_ta(info->head.block),
747 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
748 .sub_block_index = info->head.sub_block_index,
749 .address = info->address,
750 .value = info->value,
757 switch (info->head.block) {
758 case AMDGPU_RAS_BLOCK__GFX:
759 if (adev->gfx.funcs->ras_error_inject)
760 ret = adev->gfx.funcs->ras_error_inject(adev, info);
764 case AMDGPU_RAS_BLOCK__UMC:
765 case AMDGPU_RAS_BLOCK__MMHUB:
766 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
767 case AMDGPU_RAS_BLOCK__PCIE_BIF:
768 ret = psp_ras_trigger_error(&adev->psp, &block_info);
771 DRM_INFO("%s error injection is not supported yet\n",
772 ras_block_str(info->head.block));
777 DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
778 ras_block_str(info->head.block),
784 int amdgpu_ras_error_cure(struct amdgpu_device *adev,
785 struct ras_cure_if *info)
787 /* psp fw has no cure interface for now. */
791 /* get the total error counts on all IPs */
792 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
795 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
796 struct ras_manager *obj;
797 struct ras_err_data data = {0, 0};
802 list_for_each_entry(obj, &con->head, node) {
803 struct ras_query_if info = {
807 if (amdgpu_ras_error_query(adev, &info))
810 data.ce_count += info.ce_count;
811 data.ue_count += info.ue_count;
814 return is_ce ? data.ce_count : data.ue_count;
816 /* query/inject/cure end */
821 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
822 struct ras_badpage **bps, unsigned int *count);
824 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
827 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
829 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
831 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
838 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
840 * It allows user to read the bad pages of vram on the gpu through
841 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
843 * It outputs multiple lines, and each line stands for one gpu page.
845 * The format of one line is below,
846 * gpu pfn : gpu page size : flags
848 * gpu pfn and gpu page size are printed in hex format.
849 * flags can be one of below character,
851 * R: reserved, this gpu page is reserved and not able to use.
853 * P: pending for reserve, this gpu page is marked as bad, will be reserved
854 * in next window of page_reserve.
856 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
860 * .. code-block:: bash
862 * 0x00000001 : 0x00001000 : R
863 * 0x00000002 : 0x00001000 : P
867 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
868 struct kobject *kobj, struct bin_attribute *attr,
869 char *buf, loff_t ppos, size_t count)
871 struct amdgpu_ras *con =
872 container_of(attr, struct amdgpu_ras, badpages_attr);
873 struct amdgpu_device *adev = con->adev;
874 const unsigned int element_size =
875 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
876 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
877 unsigned int end = div64_ul(ppos + count - 1, element_size);
879 struct ras_badpage *bps = NULL;
880 unsigned int bps_count = 0;
882 memset(buf, 0, count);
884 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
887 for (; start < end && start < bps_count; start++)
888 s += scnprintf(&buf[s], element_size + 1,
889 "0x%08x : 0x%08x : %1s\n",
892 amdgpu_ras_badpage_flags_str(bps[start].flags));
899 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
900 struct device_attribute *attr, char *buf)
902 struct amdgpu_ras *con =
903 container_of(attr, struct amdgpu_ras, features_attr);
905 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
908 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
910 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
911 struct attribute *attrs[] = {
912 &con->features_attr.attr,
915 struct bin_attribute *bin_attrs[] = {
919 struct attribute_group group = {
922 .bin_attrs = bin_attrs,
925 con->features_attr = (struct device_attribute) {
930 .show = amdgpu_ras_sysfs_features_read,
933 con->badpages_attr = (struct bin_attribute) {
935 .name = "gpu_vram_bad_pages",
940 .read = amdgpu_ras_sysfs_badpages_read,
943 sysfs_attr_init(attrs[0]);
944 sysfs_bin_attr_init(bin_attrs[0]);
946 return sysfs_create_group(&adev->dev->kobj, &group);
949 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
951 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
952 struct attribute *attrs[] = {
953 &con->features_attr.attr,
956 struct bin_attribute *bin_attrs[] = {
960 struct attribute_group group = {
963 .bin_attrs = bin_attrs,
966 sysfs_remove_group(&adev->dev->kobj, &group);
971 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
972 struct ras_fs_if *head)
974 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
976 if (!obj || obj->attr_inuse)
981 memcpy(obj->fs_data.sysfs_name,
983 sizeof(obj->fs_data.sysfs_name));
985 obj->sysfs_attr = (struct device_attribute){
987 .name = obj->fs_data.sysfs_name,
990 .show = amdgpu_ras_sysfs_read,
992 sysfs_attr_init(&obj->sysfs_attr.attr);
994 if (sysfs_add_file_to_group(&adev->dev->kobj,
995 &obj->sysfs_attr.attr,
1001 obj->attr_inuse = 1;
1006 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1007 struct ras_common_if *head)
1009 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1011 if (!obj || !obj->attr_inuse)
1014 sysfs_remove_file_from_group(&adev->dev->kobj,
1015 &obj->sysfs_attr.attr,
1017 obj->attr_inuse = 0;
1023 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1025 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1026 struct ras_manager *obj, *tmp;
1028 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1029 amdgpu_ras_sysfs_remove(adev, &obj->head);
1032 amdgpu_ras_sysfs_remove_feature_node(adev);
1039 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1041 * Normally when there is an uncorrectable error, the driver will reset
1042 * the GPU to recover. However, in the event of an unrecoverable error,
1043 * the driver provides an interface to reboot the system automatically
1046 * The following file in debugfs provides that interface:
1047 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1051 * .. code-block:: bash
1053 * echo true > .../ras/auto_reboot
1057 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1059 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1060 struct drm_minor *minor = adev->ddev->primary;
1062 con->dir = debugfs_create_dir("ras", minor->debugfs_root);
1063 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
1064 adev, &amdgpu_ras_debugfs_ctrl_ops);
1065 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
1066 adev, &amdgpu_ras_debugfs_eeprom_ops);
1069 * After one uncorrectable error happens, usually GPU recovery will
1070 * be scheduled. But due to the known problem in GPU recovery failing
1071 * to bring GPU back, below interface provides one direct way to
1072 * user to reboot system automatically in such case within
1073 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1074 * will never be called.
1076 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
1080 void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1081 struct ras_fs_if *head)
1083 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1084 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1086 if (!obj || obj->ent)
1091 memcpy(obj->fs_data.debugfs_name,
1093 sizeof(obj->fs_data.debugfs_name));
1095 obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
1096 S_IWUGO | S_IRUGO, con->dir, obj,
1097 &amdgpu_ras_debugfs_ops);
1100 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
1101 struct ras_common_if *head)
1103 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1105 if (!obj || !obj->ent)
1108 debugfs_remove(obj->ent);
1113 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
1115 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1116 struct ras_manager *obj, *tmp;
1118 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1119 amdgpu_ras_debugfs_remove(adev, &obj->head);
1122 debugfs_remove_recursive(con->dir);
1129 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1131 amdgpu_ras_sysfs_create_feature_node(adev);
1132 amdgpu_ras_debugfs_create_ctrl_node(adev);
1137 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1139 amdgpu_ras_debugfs_remove_all(adev);
1140 amdgpu_ras_sysfs_remove_all(adev);
1146 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1148 struct ras_ih_data *data = &obj->ih_data;
1149 struct amdgpu_iv_entry entry;
1151 struct ras_err_data err_data = {0, 0, 0, NULL};
1153 while (data->rptr != data->wptr) {
1155 memcpy(&entry, &data->ring[data->rptr],
1156 data->element_size);
1159 data->rptr = (data->aligned_element_size +
1160 data->rptr) % data->ring_size;
1162 /* Let IP handle its data, maybe we need get the output
1163 * from the callback to udpate the error type/count, etc
1166 ret = data->cb(obj->adev, &err_data, &entry);
1167 /* ue will trigger an interrupt, and in that case
1168 * we need do a reset to recovery the whole system.
1169 * But leave IP do that recovery, here we just dispatch
1172 if (ret == AMDGPU_RAS_SUCCESS) {
1173 /* these counts could be left as 0 if
1174 * some blocks do not count error number
1176 obj->err_data.ue_count += err_data.ue_count;
1177 obj->err_data.ce_count += err_data.ce_count;
1183 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1185 struct ras_ih_data *data =
1186 container_of(work, struct ras_ih_data, ih_work);
1187 struct ras_manager *obj =
1188 container_of(data, struct ras_manager, ih_data);
1190 amdgpu_ras_interrupt_handler(obj);
1193 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1194 struct ras_dispatch_if *info)
1196 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1197 struct ras_ih_data *data = &obj->ih_data;
1202 if (data->inuse == 0)
1205 /* Might be overflow... */
1206 memcpy(&data->ring[data->wptr], info->entry,
1207 data->element_size);
1210 data->wptr = (data->aligned_element_size +
1211 data->wptr) % data->ring_size;
1213 schedule_work(&data->ih_work);
1218 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1219 struct ras_ih_if *info)
1221 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1222 struct ras_ih_data *data;
1227 data = &obj->ih_data;
1228 if (data->inuse == 0)
1231 cancel_work_sync(&data->ih_work);
1234 memset(data, 0, sizeof(*data));
1240 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1241 struct ras_ih_if *info)
1243 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1244 struct ras_ih_data *data;
1247 /* in case we registe the IH before enable ras feature */
1248 obj = amdgpu_ras_create_obj(adev, &info->head);
1254 data = &obj->ih_data;
1255 /* add the callback.etc */
1256 *data = (struct ras_ih_data) {
1259 .element_size = sizeof(struct amdgpu_iv_entry),
1264 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1266 data->aligned_element_size = ALIGN(data->element_size, 8);
1267 /* the ring can store 64 iv entries. */
1268 data->ring_size = 64 * data->aligned_element_size;
1269 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1281 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1283 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1284 struct ras_manager *obj, *tmp;
1286 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1287 struct ras_ih_if info = {
1290 amdgpu_ras_interrupt_remove_handler(adev, &info);
1297 /* recovery begin */
1299 /* return 0 on success.
1300 * caller need free bps.
1302 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1303 struct ras_badpage **bps, unsigned int *count)
1305 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1306 struct ras_err_handler_data *data;
1310 if (!con || !con->eh_data || !bps || !count)
1313 mutex_lock(&con->recovery_lock);
1314 data = con->eh_data;
1315 if (!data || data->count == 0) {
1320 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1326 for (; i < data->count; i++) {
1327 (*bps)[i] = (struct ras_badpage){
1328 .bp = data->bps[i].retired_page,
1329 .size = AMDGPU_GPU_PAGE_SIZE,
1330 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1333 if (data->last_reserved <= i)
1334 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1335 else if (data->bps_bo[i] == NULL)
1336 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1339 *count = data->count;
1341 mutex_unlock(&con->recovery_lock);
1345 static void amdgpu_ras_do_recovery(struct work_struct *work)
1347 struct amdgpu_ras *ras =
1348 container_of(work, struct amdgpu_ras, recovery_work);
1350 amdgpu_device_gpu_recover(ras->adev, 0);
1351 atomic_set(&ras->in_recovery, 0);
1354 /* alloc/realloc bps array */
1355 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1356 struct ras_err_handler_data *data, int pages)
1358 unsigned int old_space = data->count + data->space_left;
1359 unsigned int new_space = old_space + pages;
1360 unsigned int align_space = ALIGN(new_space, 512);
1361 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1362 struct amdgpu_bo **bps_bo =
1363 kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
1365 if (!bps || !bps_bo) {
1372 memcpy(bps, data->bps,
1373 data->count * sizeof(*data->bps));
1377 memcpy(bps_bo, data->bps_bo,
1378 data->count * sizeof(*data->bps_bo));
1379 kfree(data->bps_bo);
1383 data->bps_bo = bps_bo;
1384 data->space_left += align_space - old_space;
1388 /* it deal with vram only. */
1389 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1390 struct eeprom_table_record *bps, int pages)
1392 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1393 struct ras_err_handler_data *data;
1396 if (!con || !con->eh_data || !bps || pages <= 0)
1399 mutex_lock(&con->recovery_lock);
1400 data = con->eh_data;
1404 if (data->space_left <= pages)
1405 if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
1410 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
1411 data->count += pages;
1412 data->space_left -= pages;
1415 mutex_unlock(&con->recovery_lock);
1421 * write error record array to eeprom, the function should be
1422 * protected by recovery_lock
1424 static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1426 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1427 struct ras_err_handler_data *data;
1428 struct amdgpu_ras_eeprom_control *control;
1431 if (!con || !con->eh_data)
1434 control = &con->eeprom_control;
1435 data = con->eh_data;
1436 save_count = data->count - control->num_recs;
1437 /* only new entries are saved */
1439 if (amdgpu_ras_eeprom_process_recods(control,
1440 &data->bps[control->num_recs],
1443 DRM_ERROR("Failed to save EEPROM table data!");
1451 * read error record array in eeprom and reserve enough space for
1452 * storing new bad pages
1454 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1456 struct amdgpu_ras_eeprom_control *control =
1457 &adev->psp.ras.ras->eeprom_control;
1458 struct eeprom_table_record *bps = NULL;
1461 /* no bad page record, skip eeprom access */
1462 if (!control->num_recs)
1465 bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1469 if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1470 control->num_recs)) {
1471 DRM_ERROR("Failed to load EEPROM table records!");
1476 ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1484 * check if an address belongs to bad page
1486 * Note: this check is only for umc block
1488 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1491 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1492 struct ras_err_handler_data *data;
1496 if (!con || !con->eh_data)
1499 mutex_lock(&con->recovery_lock);
1500 data = con->eh_data;
1504 addr >>= AMDGPU_GPU_PAGE_SHIFT;
1505 for (i = 0; i < data->count; i++)
1506 if (addr == data->bps[i].retired_page) {
1512 mutex_unlock(&con->recovery_lock);
1516 /* called in gpu recovery/init */
1517 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
1519 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1520 struct ras_err_handler_data *data;
1522 struct amdgpu_bo *bo = NULL;
1525 if (!con || !con->eh_data)
1528 mutex_lock(&con->recovery_lock);
1529 data = con->eh_data;
1532 /* reserve vram at driver post stage. */
1533 for (i = data->last_reserved; i < data->count; i++) {
1534 bp = data->bps[i].retired_page;
1536 /* There are two cases of reserve error should be ignored:
1537 * 1) a ras bad page has been allocated (used by someone);
1538 * 2) a ras bad page has been reserved (duplicate error injection
1541 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
1542 AMDGPU_GPU_PAGE_SIZE,
1543 AMDGPU_GEM_DOMAIN_VRAM,
1545 DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
1547 data->bps_bo[i] = bo;
1548 data->last_reserved = i + 1;
1552 /* continue to save bad pages to eeprom even reesrve_vram fails */
1553 ret = amdgpu_ras_save_bad_pages(adev);
1555 mutex_unlock(&con->recovery_lock);
1559 /* called when driver unload */
1560 static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
1562 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1563 struct ras_err_handler_data *data;
1564 struct amdgpu_bo *bo;
1567 if (!con || !con->eh_data)
1570 mutex_lock(&con->recovery_lock);
1571 data = con->eh_data;
1575 for (i = data->last_reserved - 1; i >= 0; i--) {
1576 bo = data->bps_bo[i];
1578 amdgpu_bo_free_kernel(&bo, NULL, NULL);
1580 data->bps_bo[i] = bo;
1581 data->last_reserved = i;
1584 mutex_unlock(&con->recovery_lock);
1588 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1590 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1591 struct ras_err_handler_data **data;
1595 data = &con->eh_data;
1599 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1605 mutex_init(&con->recovery_lock);
1606 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1607 atomic_set(&con->in_recovery, 0);
1610 ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
1614 if (con->eeprom_control.num_recs) {
1615 ret = amdgpu_ras_load_bad_pages(adev);
1618 ret = amdgpu_ras_reserve_bad_pages(adev);
1626 amdgpu_ras_release_bad_pages(adev);
1628 kfree((*data)->bps);
1629 kfree((*data)->bps_bo);
1631 con->eh_data = NULL;
1633 DRM_WARN("Failed to initialize ras recovery!\n");
1638 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1640 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1641 struct ras_err_handler_data *data = con->eh_data;
1643 /* recovery_init failed to init it, fini is useless */
1647 cancel_work_sync(&con->recovery_work);
1648 amdgpu_ras_release_bad_pages(adev);
1650 mutex_lock(&con->recovery_lock);
1651 con->eh_data = NULL;
1653 kfree(data->bps_bo);
1655 mutex_unlock(&con->recovery_lock);
1661 /* return 0 if ras will reset gpu and repost.*/
1662 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
1665 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1670 ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1675 * check hardware's ras ability which will be saved in hw_supported.
1676 * if hardware does not support ras, we can skip some ras initializtion and
1677 * forbid some ras operations from IP.
1678 * if software itself, say boot parameter, limit the ras ability. We still
1679 * need allow IP do some limited operations, like disable. In such case,
1680 * we have to initialize ras as normal. but need check if operation is
1681 * allowed or not in each function.
1683 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1684 uint32_t *hw_supported, uint32_t *supported)
1689 if (amdgpu_sriov_vf(adev) ||
1690 adev->asic_type != CHIP_VEGA20)
1693 if (adev->is_atom_fw &&
1694 (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
1695 amdgpu_atomfirmware_sram_ecc_supported(adev)))
1696 *hw_supported = AMDGPU_RAS_BLOCK_MASK;
1698 *supported = amdgpu_ras_enable == 0 ?
1699 0 : *hw_supported & amdgpu_ras_mask;
1702 int amdgpu_ras_init(struct amdgpu_device *adev)
1704 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1710 con = kmalloc(sizeof(struct amdgpu_ras) +
1711 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
1712 GFP_KERNEL|__GFP_ZERO);
1716 con->objs = (struct ras_manager *)(con + 1);
1718 amdgpu_ras_set_context(adev, con);
1720 amdgpu_ras_check_supported(adev, &con->hw_supported,
1722 if (!con->hw_supported) {
1723 amdgpu_ras_set_context(adev, NULL);
1729 INIT_LIST_HEAD(&con->head);
1730 /* Might need get this flag from vbios. */
1731 con->flags = RAS_DEFAULT_FLAGS;
1733 if (adev->nbio.funcs->init_ras_controller_interrupt) {
1734 r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
1739 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
1740 r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
1745 amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
1747 if (amdgpu_ras_fs_init(adev))
1750 DRM_INFO("RAS INFO: ras initialized successfully, "
1751 "hardware ability[%x] ras_mask[%x]\n",
1752 con->hw_supported, con->supported);
1755 amdgpu_ras_set_context(adev, NULL);
1761 /* helper function to handle common stuff in ip late init phase */
1762 int amdgpu_ras_late_init(struct amdgpu_device *adev,
1763 struct ras_common_if *ras_block,
1764 struct ras_fs_if *fs_info,
1765 struct ras_ih_if *ih_info)
1769 /* disable RAS feature per IP block if it is not supported */
1770 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
1771 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
1775 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
1778 /* request gpu reset. will run again */
1779 amdgpu_ras_request_reset_on_boot(adev,
1782 } else if (adev->in_suspend || adev->in_gpu_reset) {
1783 /* in resume phase, if fail to enable ras,
1784 * clean up all ras fs nodes, and disable ras */
1790 /* in resume phase, no need to create ras fs node */
1791 if (adev->in_suspend || adev->in_gpu_reset)
1795 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
1800 amdgpu_ras_debugfs_create(adev, fs_info);
1802 r = amdgpu_ras_sysfs_create(adev, fs_info);
1808 amdgpu_ras_sysfs_remove(adev, ras_block);
1810 amdgpu_ras_debugfs_remove(adev, ras_block);
1812 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
1814 amdgpu_ras_feature_enable(adev, ras_block, 0);
1818 /* helper function to remove ras fs node and interrupt handler */
1819 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
1820 struct ras_common_if *ras_block,
1821 struct ras_ih_if *ih_info)
1823 if (!ras_block || !ih_info)
1826 amdgpu_ras_sysfs_remove(adev, ras_block);
1827 amdgpu_ras_debugfs_remove(adev, ras_block);
1829 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
1830 amdgpu_ras_feature_enable(adev, ras_block, 0);
1833 /* do some init work after IP late init as dependence.
1834 * and it runs in resume/gpu reset/booting up cases.
1836 void amdgpu_ras_resume(struct amdgpu_device *adev)
1838 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1839 struct ras_manager *obj, *tmp;
1844 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
1845 /* Set up all other IPs which are not implemented. There is a
1846 * tricky thing that IP's actual ras error type should be
1847 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
1848 * ERROR_NONE make sense anyway.
1850 amdgpu_ras_enable_all_features(adev, 1);
1852 /* We enable ras on all hw_supported block, but as boot
1853 * parameter might disable some of them and one or more IP has
1854 * not implemented yet. So we disable them on behalf.
1856 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1857 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
1858 amdgpu_ras_feature_enable(adev, &obj->head, 0);
1859 /* there should be no any reference. */
1860 WARN_ON(alive_obj(obj));
1865 if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
1866 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1867 /* setup ras obj state as disabled.
1868 * for init_by_vbios case.
1869 * if we want to enable ras, just enable it in a normal way.
1870 * If we want do disable it, need setup ras obj as enabled,
1871 * then issue another TA disable cmd.
1872 * See feature_enable_on_boot
1874 amdgpu_ras_disable_all_features(adev, 1);
1875 amdgpu_ras_reset_gpu(adev, 0);
1879 void amdgpu_ras_suspend(struct amdgpu_device *adev)
1881 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1886 amdgpu_ras_disable_all_features(adev, 0);
1887 /* Make sure all ras objects are disabled. */
1889 amdgpu_ras_disable_all_features(adev, 1);
1892 /* do some fini work before IP fini as dependence */
1893 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
1895 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1900 /* Need disable ras on all IPs here before ip [hw/sw]fini */
1901 amdgpu_ras_disable_all_features(adev, 0);
1902 amdgpu_ras_recovery_fini(adev);
1906 int amdgpu_ras_fini(struct amdgpu_device *adev)
1908 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1913 amdgpu_ras_fs_fini(adev);
1914 amdgpu_ras_interrupt_remove_all(adev);
1916 WARN(con->features, "Feature mask is not cleared");
1919 amdgpu_ras_disable_all_features(adev, 1);
1921 amdgpu_ras_set_context(adev, NULL);
1927 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
1929 uint32_t hw_supported, supported;
1931 amdgpu_ras_check_supported(adev, &hw_supported, &supported);
1935 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
1936 DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
1938 amdgpu_ras_reset_gpu(adev, false);