3c4600e15b862a826ec5d6f9423789afb112158b
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
38 #include "nbio_v7_9.h"
39 #include "atom.h"
40 #include "amdgpu_reset.h"
41
42 #ifdef CONFIG_X86_MCE_AMD
43 #include <asm/mce.h>
44
45 static bool notifier_registered;
46 #endif
47 static const char *RAS_FS_NAME = "ras";
48
49 const char *ras_error_string[] = {
50         "none",
51         "parity",
52         "single_correctable",
53         "multi_uncorrectable",
54         "poison",
55 };
56
57 const char *ras_block_string[] = {
58         "umc",
59         "sdma",
60         "gfx",
61         "mmhub",
62         "athub",
63         "pcie_bif",
64         "hdp",
65         "xgmi_wafl",
66         "df",
67         "smn",
68         "sem",
69         "mp0",
70         "mp1",
71         "fuse",
72         "mca",
73         "vcn",
74         "jpeg",
75 };
76
77 const char *ras_mca_block_string[] = {
78         "mca_mp0",
79         "mca_mp1",
80         "mca_mpio",
81         "mca_iohc",
82 };
83
84 struct amdgpu_ras_block_list {
85         /* ras block link */
86         struct list_head node;
87
88         struct amdgpu_ras_block_object *ras_obj;
89 };
90
91 const char *get_ras_block_str(struct ras_common_if *ras_block)
92 {
93         if (!ras_block)
94                 return "NULL";
95
96         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
97                 return "OUT OF RANGE";
98
99         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
100                 return ras_mca_block_string[ras_block->sub_block_index];
101
102         return ras_block_string[ras_block->block];
103 }
104
105 #define ras_block_str(_BLOCK_) \
106         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
107
108 #define ras_err_str(i) (ras_error_string[ffs(i)])
109
110 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
111
112 /* inject address is 52 bits */
113 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
114
115 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
116 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
117
118 enum amdgpu_ras_retire_page_reservation {
119         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
120         AMDGPU_RAS_RETIRE_PAGE_PENDING,
121         AMDGPU_RAS_RETIRE_PAGE_FAULT,
122 };
123
124 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
125
126 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
127                                 uint64_t addr);
128 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
129                                 uint64_t addr);
130 #ifdef CONFIG_X86_MCE_AMD
131 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
132 struct mce_notifier_adev_list {
133         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
134         int num_gpu;
135 };
136 static struct mce_notifier_adev_list mce_adev_list;
137 #endif
138
139 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
140 {
141         if (adev && amdgpu_ras_get_context(adev))
142                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
143 }
144
145 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
146 {
147         if (adev && amdgpu_ras_get_context(adev))
148                 return amdgpu_ras_get_context(adev)->error_query_ready;
149
150         return false;
151 }
152
153 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
154 {
155         struct ras_err_data err_data = {0, 0, 0, NULL};
156         struct eeprom_table_record err_rec;
157
158         if ((address >= adev->gmc.mc_vram_size) ||
159             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
160                 dev_warn(adev->dev,
161                          "RAS WARN: input address 0x%llx is invalid.\n",
162                          address);
163                 return -EINVAL;
164         }
165
166         if (amdgpu_ras_check_bad_page(adev, address)) {
167                 dev_warn(adev->dev,
168                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
169                          address);
170                 return 0;
171         }
172
173         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
174         err_data.err_addr = &err_rec;
175         amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
176
177         if (amdgpu_bad_page_threshold != 0) {
178                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
179                                          err_data.err_addr_cnt);
180                 amdgpu_ras_save_bad_pages(adev, NULL);
181         }
182
183         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
184         dev_warn(adev->dev, "Clear EEPROM:\n");
185         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
186
187         return 0;
188 }
189
190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
191                                         size_t size, loff_t *pos)
192 {
193         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
194         struct ras_query_if info = {
195                 .head = obj->head,
196         };
197         ssize_t s;
198         char val[128];
199
200         if (amdgpu_ras_query_error_status(obj->adev, &info))
201                 return -EINVAL;
202
203         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
204         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
205             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
206                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
207                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
208         }
209
210         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211                         "ue", info.ue_count,
212                         "ce", info.ce_count);
213         if (*pos >= s)
214                 return 0;
215
216         s -= *pos;
217         s = min_t(u64, s, size);
218
219
220         if (copy_to_user(buf, &val[*pos], s))
221                 return -EINVAL;
222
223         *pos += s;
224
225         return s;
226 }
227
228 static const struct file_operations amdgpu_ras_debugfs_ops = {
229         .owner = THIS_MODULE,
230         .read = amdgpu_ras_debugfs_read,
231         .write = NULL,
232         .llseek = default_llseek
233 };
234
235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
236 {
237         int i;
238
239         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240                 *block_id = i;
241                 if (strcmp(name, ras_block_string[i]) == 0)
242                         return 0;
243         }
244         return -EINVAL;
245 }
246
247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
248                 const char __user *buf, size_t size,
249                 loff_t *pos, struct ras_debug_if *data)
250 {
251         ssize_t s = min_t(u64, 64, size);
252         char str[65];
253         char block_name[33];
254         char err[9] = "ue";
255         int op = -1;
256         int block_id;
257         uint32_t sub_block;
258         u64 address, value;
259         /* default value is 0 if the mask is not set by user */
260         u32 instance_mask = 0;
261
262         if (*pos)
263                 return -EINVAL;
264         *pos = size;
265
266         memset(str, 0, sizeof(str));
267         memset(data, 0, sizeof(*data));
268
269         if (copy_from_user(str, buf, s))
270                 return -EINVAL;
271
272         if (sscanf(str, "disable %32s", block_name) == 1)
273                 op = 0;
274         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
275                 op = 1;
276         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
277                 op = 2;
278         else if (strstr(str, "retire_page") != NULL)
279                 op = 3;
280         else if (str[0] && str[1] && str[2] && str[3])
281                 /* ascii string, but commands are not matched. */
282                 return -EINVAL;
283
284         if (op != -1) {
285                 if (op == 3) {
286                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
287                             sscanf(str, "%*s %llu", &address) != 1)
288                                 return -EINVAL;
289
290                         data->op = op;
291                         data->inject.address = address;
292
293                         return 0;
294                 }
295
296                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
297                         return -EINVAL;
298
299                 data->head.block = block_id;
300                 /* only ue and ce errors are supported */
301                 if (!memcmp("ue", err, 2))
302                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
303                 else if (!memcmp("ce", err, 2))
304                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
305                 else
306                         return -EINVAL;
307
308                 data->op = op;
309
310                 if (op == 2) {
311                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
312                                    &sub_block, &address, &value, &instance_mask) != 4 &&
313                             sscanf(str, "%*s %*s %*s %u %llu %llu %u",
314                                    &sub_block, &address, &value, &instance_mask) != 4 &&
315                                 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
316                                    &sub_block, &address, &value) != 3 &&
317                             sscanf(str, "%*s %*s %*s %u %llu %llu",
318                                    &sub_block, &address, &value) != 3)
319                                 return -EINVAL;
320                         data->head.sub_block_index = sub_block;
321                         data->inject.address = address;
322                         data->inject.value = value;
323                         data->inject.instance_mask = instance_mask;
324                 }
325         } else {
326                 if (size < sizeof(*data))
327                         return -EINVAL;
328
329                 if (copy_from_user(data, buf, sizeof(*data)))
330                         return -EINVAL;
331         }
332
333         return 0;
334 }
335
336 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
337                                 struct ras_debug_if *data)
338 {
339         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
340         uint32_t mask, inst_mask = data->inject.instance_mask;
341
342         /* no need to set instance mask if there is only one instance */
343         if (num_xcc <= 1 && inst_mask) {
344                 data->inject.instance_mask = 0;
345                 dev_dbg(adev->dev,
346                         "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
347                         inst_mask);
348
349                 return;
350         }
351
352         switch (data->head.block) {
353         case AMDGPU_RAS_BLOCK__GFX:
354                 mask = GENMASK(num_xcc - 1, 0);
355                 break;
356         case AMDGPU_RAS_BLOCK__SDMA:
357                 mask = GENMASK(adev->sdma.num_instances - 1, 0);
358                 break;
359         case AMDGPU_RAS_BLOCK__VCN:
360         case AMDGPU_RAS_BLOCK__JPEG:
361                 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
362                 break;
363         default:
364                 mask = inst_mask;
365                 break;
366         }
367
368         /* remove invalid bits in instance mask */
369         data->inject.instance_mask &= mask;
370         if (inst_mask != data->inject.instance_mask)
371                 dev_dbg(adev->dev,
372                         "Adjust RAS inject mask 0x%x to 0x%x\n",
373                         inst_mask, data->inject.instance_mask);
374 }
375
376 /**
377  * DOC: AMDGPU RAS debugfs control interface
378  *
379  * The control interface accepts struct ras_debug_if which has two members.
380  *
381  * First member: ras_debug_if::head or ras_debug_if::inject.
382  *
383  * head is used to indicate which IP block will be under control.
384  *
385  * head has four members, they are block, type, sub_block_index, name.
386  * block: which IP will be under control.
387  * type: what kind of error will be enabled/disabled/injected.
388  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
389  * name: the name of IP.
390  *
391  * inject has three more members than head, they are address, value and mask.
392  * As their names indicate, inject operation will write the
393  * value to the address.
394  *
395  * The second member: struct ras_debug_if::op.
396  * It has three kinds of operations.
397  *
398  * - 0: disable RAS on the block. Take ::head as its data.
399  * - 1: enable RAS on the block. Take ::head as its data.
400  * - 2: inject errors on the block. Take ::inject as its data.
401  *
402  * How to use the interface?
403  *
404  * In a program
405  *
406  * Copy the struct ras_debug_if in your code and initialize it.
407  * Write the struct to the control interface.
408  *
409  * From shell
410  *
411  * .. code-block:: bash
412  *
413  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
414  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
415  *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
416  *
417  * Where N, is the card which you want to affect.
418  *
419  * "disable" requires only the block.
420  * "enable" requires the block and error type.
421  * "inject" requires the block, error type, address, and value.
422  *
423  * The block is one of: umc, sdma, gfx, etc.
424  *      see ras_block_string[] for details
425  *
426  * The error type is one of: ue, ce, where,
427  *      ue is multi-uncorrectable
428  *      ce is single-correctable
429  *
430  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
431  * The address and value are hexadecimal numbers, leading 0x is optional.
432  * The mask means instance mask, is optional, default value is 0x1.
433  *
434  * For instance,
435  *
436  * .. code-block:: bash
437  *
438  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
439  *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
440  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
441  *
442  * How to check the result of the operation?
443  *
444  * To check disable/enable, see "ras" features at,
445  * /sys/class/drm/card[0/1/2...]/device/ras/features
446  *
447  * To check inject, see the corresponding error count at,
448  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
449  *
450  * .. note::
451  *      Operations are only allowed on blocks which are supported.
452  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
453  *      to see which blocks support RAS on a particular asic.
454  *
455  */
456 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
457                                              const char __user *buf,
458                                              size_t size, loff_t *pos)
459 {
460         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
461         struct ras_debug_if data;
462         int ret = 0;
463
464         if (!amdgpu_ras_get_error_query_ready(adev)) {
465                 dev_warn(adev->dev, "RAS WARN: error injection "
466                                 "currently inaccessible\n");
467                 return size;
468         }
469
470         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
471         if (ret)
472                 return ret;
473
474         if (data.op == 3) {
475                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
476                 if (!ret)
477                         return size;
478                 else
479                         return ret;
480         }
481
482         if (!amdgpu_ras_is_supported(adev, data.head.block))
483                 return -EINVAL;
484
485         switch (data.op) {
486         case 0:
487                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
488                 break;
489         case 1:
490                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
491                 break;
492         case 2:
493                 if ((data.inject.address >= adev->gmc.mc_vram_size &&
494                     adev->gmc.mc_vram_size) ||
495                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
496                         dev_warn(adev->dev, "RAS WARN: input address "
497                                         "0x%llx is invalid.",
498                                         data.inject.address);
499                         ret = -EINVAL;
500                         break;
501                 }
502
503                 /* umc ce/ue error injection for a bad page is not allowed */
504                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
505                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
506                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
507                                  "already been marked as bad!\n",
508                                  data.inject.address);
509                         break;
510                 }
511
512                 amdgpu_ras_instance_mask_check(adev, &data);
513
514                 /* data.inject.address is offset instead of absolute gpu address */
515                 ret = amdgpu_ras_error_inject(adev, &data.inject);
516                 break;
517         default:
518                 ret = -EINVAL;
519                 break;
520         }
521
522         if (ret)
523                 return ret;
524
525         return size;
526 }
527
528 /**
529  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
530  *
531  * Some boards contain an EEPROM which is used to persistently store a list of
532  * bad pages which experiences ECC errors in vram.  This interface provides
533  * a way to reset the EEPROM, e.g., after testing error injection.
534  *
535  * Usage:
536  *
537  * .. code-block:: bash
538  *
539  *      echo 1 > ../ras/ras_eeprom_reset
540  *
541  * will reset EEPROM table to 0 entries.
542  *
543  */
544 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
545                                                const char __user *buf,
546                                                size_t size, loff_t *pos)
547 {
548         struct amdgpu_device *adev =
549                 (struct amdgpu_device *)file_inode(f)->i_private;
550         int ret;
551
552         ret = amdgpu_ras_eeprom_reset_table(
553                 &(amdgpu_ras_get_context(adev)->eeprom_control));
554
555         if (!ret) {
556                 /* Something was written to EEPROM.
557                  */
558                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
559                 return size;
560         } else {
561                 return ret;
562         }
563 }
564
565 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
566         .owner = THIS_MODULE,
567         .read = NULL,
568         .write = amdgpu_ras_debugfs_ctrl_write,
569         .llseek = default_llseek
570 };
571
572 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
573         .owner = THIS_MODULE,
574         .read = NULL,
575         .write = amdgpu_ras_debugfs_eeprom_write,
576         .llseek = default_llseek
577 };
578
579 /**
580  * DOC: AMDGPU RAS sysfs Error Count Interface
581  *
582  * It allows the user to read the error count for each IP block on the gpu through
583  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
584  *
585  * It outputs the multiple lines which report the uncorrected (ue) and corrected
586  * (ce) error counts.
587  *
588  * The format of one line is below,
589  *
590  * [ce|ue]: count
591  *
592  * Example:
593  *
594  * .. code-block:: bash
595  *
596  *      ue: 0
597  *      ce: 1
598  *
599  */
600 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
601                 struct device_attribute *attr, char *buf)
602 {
603         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
604         struct ras_query_if info = {
605                 .head = obj->head,
606         };
607
608         if (!amdgpu_ras_get_error_query_ready(obj->adev))
609                 return sysfs_emit(buf, "Query currently inaccessible\n");
610
611         if (amdgpu_ras_query_error_status(obj->adev, &info))
612                 return -EINVAL;
613
614         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
615             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
616                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
617                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
618         }
619
620         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
621                           "ce", info.ce_count);
622 }
623
624 /* obj begin */
625
626 #define get_obj(obj) do { (obj)->use++; } while (0)
627 #define alive_obj(obj) ((obj)->use)
628
629 static inline void put_obj(struct ras_manager *obj)
630 {
631         if (obj && (--obj->use == 0))
632                 list_del(&obj->node);
633         if (obj && (obj->use < 0))
634                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
635 }
636
637 /* make one obj and return it. */
638 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
639                 struct ras_common_if *head)
640 {
641         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
642         struct ras_manager *obj;
643
644         if (!adev->ras_enabled || !con)
645                 return NULL;
646
647         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
648                 return NULL;
649
650         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
651                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
652                         return NULL;
653
654                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
655         } else
656                 obj = &con->objs[head->block];
657
658         /* already exist. return obj? */
659         if (alive_obj(obj))
660                 return NULL;
661
662         obj->head = *head;
663         obj->adev = adev;
664         list_add(&obj->node, &con->head);
665         get_obj(obj);
666
667         return obj;
668 }
669
670 /* return an obj equal to head, or the first when head is NULL */
671 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
672                 struct ras_common_if *head)
673 {
674         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
675         struct ras_manager *obj;
676         int i;
677
678         if (!adev->ras_enabled || !con)
679                 return NULL;
680
681         if (head) {
682                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
683                         return NULL;
684
685                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
686                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
687                                 return NULL;
688
689                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
690                 } else
691                         obj = &con->objs[head->block];
692
693                 if (alive_obj(obj))
694                         return obj;
695         } else {
696                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
697                         obj = &con->objs[i];
698                         if (alive_obj(obj))
699                                 return obj;
700                 }
701         }
702
703         return NULL;
704 }
705 /* obj end */
706
707 /* feature ctl begin */
708 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
709                                          struct ras_common_if *head)
710 {
711         return adev->ras_hw_enabled & BIT(head->block);
712 }
713
714 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
715                 struct ras_common_if *head)
716 {
717         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
718
719         return con->features & BIT(head->block);
720 }
721
722 /*
723  * if obj is not created, then create one.
724  * set feature enable flag.
725  */
726 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
727                 struct ras_common_if *head, int enable)
728 {
729         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
730         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
731
732         /* If hardware does not support ras, then do not create obj.
733          * But if hardware support ras, we can create the obj.
734          * Ras framework checks con->hw_supported to see if it need do
735          * corresponding initialization.
736          * IP checks con->support to see if it need disable ras.
737          */
738         if (!amdgpu_ras_is_feature_allowed(adev, head))
739                 return 0;
740
741         if (enable) {
742                 if (!obj) {
743                         obj = amdgpu_ras_create_obj(adev, head);
744                         if (!obj)
745                                 return -EINVAL;
746                 } else {
747                         /* In case we create obj somewhere else */
748                         get_obj(obj);
749                 }
750                 con->features |= BIT(head->block);
751         } else {
752                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
753                         con->features &= ~BIT(head->block);
754                         put_obj(obj);
755                 }
756         }
757
758         return 0;
759 }
760
761 /* wrapper of psp_ras_enable_features */
762 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
763                 struct ras_common_if *head, bool enable)
764 {
765         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
766         union ta_ras_cmd_input *info;
767         int ret;
768
769         if (!con)
770                 return -EINVAL;
771
772         /* Do not enable ras feature if it is not allowed */
773         if (enable &&
774             head->block != AMDGPU_RAS_BLOCK__GFX &&
775             !amdgpu_ras_is_feature_allowed(adev, head))
776                 return 0;
777
778         /* Only enable gfx ras feature from host side */
779         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
780             !amdgpu_sriov_vf(adev) &&
781             !amdgpu_ras_intr_triggered()) {
782                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
783                 if (!info)
784                         return -ENOMEM;
785
786                 if (!enable) {
787                         info->disable_features = (struct ta_ras_disable_features_input) {
788                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
789                                 .error_type = amdgpu_ras_error_to_ta(head->type),
790                         };
791                 } else {
792                         info->enable_features = (struct ta_ras_enable_features_input) {
793                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
794                                 .error_type = amdgpu_ras_error_to_ta(head->type),
795                         };
796                 }
797
798                 ret = psp_ras_enable_features(&adev->psp, info, enable);
799                 if (ret) {
800                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
801                                 enable ? "enable":"disable",
802                                 get_ras_block_str(head),
803                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
804                         return ret;
805                 }
806
807                 kfree(info);
808         }
809
810         /* setup the obj */
811         __amdgpu_ras_feature_enable(adev, head, enable);
812
813         return 0;
814 }
815
816 /* Only used in device probe stage and called only once. */
817 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
818                 struct ras_common_if *head, bool enable)
819 {
820         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
821         int ret;
822
823         if (!con)
824                 return -EINVAL;
825
826         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
827                 if (enable) {
828                         /* There is no harm to issue a ras TA cmd regardless of
829                          * the currecnt ras state.
830                          * If current state == target state, it will do nothing
831                          * But sometimes it requests driver to reset and repost
832                          * with error code -EAGAIN.
833                          */
834                         ret = amdgpu_ras_feature_enable(adev, head, 1);
835                         /* With old ras TA, we might fail to enable ras.
836                          * Log it and just setup the object.
837                          * TODO need remove this WA in the future.
838                          */
839                         if (ret == -EINVAL) {
840                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
841                                 if (!ret)
842                                         dev_info(adev->dev,
843                                                 "RAS INFO: %s setup object\n",
844                                                 get_ras_block_str(head));
845                         }
846                 } else {
847                         /* setup the object then issue a ras TA disable cmd.*/
848                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
849                         if (ret)
850                                 return ret;
851
852                         /* gfx block ras dsiable cmd must send to ras-ta */
853                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
854                                 con->features |= BIT(head->block);
855
856                         ret = amdgpu_ras_feature_enable(adev, head, 0);
857
858                         /* clean gfx block ras features flag */
859                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
860                                 con->features &= ~BIT(head->block);
861                 }
862         } else
863                 ret = amdgpu_ras_feature_enable(adev, head, enable);
864
865         return ret;
866 }
867
868 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
869                 bool bypass)
870 {
871         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
872         struct ras_manager *obj, *tmp;
873
874         list_for_each_entry_safe(obj, tmp, &con->head, node) {
875                 /* bypass psp.
876                  * aka just release the obj and corresponding flags
877                  */
878                 if (bypass) {
879                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
880                                 break;
881                 } else {
882                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
883                                 break;
884                 }
885         }
886
887         return con->features;
888 }
889
890 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
891                 bool bypass)
892 {
893         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
894         int i;
895         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
896
897         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
898                 struct ras_common_if head = {
899                         .block = i,
900                         .type = default_ras_type,
901                         .sub_block_index = 0,
902                 };
903
904                 if (i == AMDGPU_RAS_BLOCK__MCA)
905                         continue;
906
907                 if (bypass) {
908                         /*
909                          * bypass psp. vbios enable ras for us.
910                          * so just create the obj
911                          */
912                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
913                                 break;
914                 } else {
915                         if (amdgpu_ras_feature_enable(adev, &head, 1))
916                                 break;
917                 }
918         }
919
920         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
921                 struct ras_common_if head = {
922                         .block = AMDGPU_RAS_BLOCK__MCA,
923                         .type = default_ras_type,
924                         .sub_block_index = i,
925                 };
926
927                 if (bypass) {
928                         /*
929                          * bypass psp. vbios enable ras for us.
930                          * so just create the obj
931                          */
932                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
933                                 break;
934                 } else {
935                         if (amdgpu_ras_feature_enable(adev, &head, 1))
936                                 break;
937                 }
938         }
939
940         return con->features;
941 }
942 /* feature ctl end */
943
944 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
945                 enum amdgpu_ras_block block)
946 {
947         if (!block_obj)
948                 return -EINVAL;
949
950         if (block_obj->ras_comm.block == block)
951                 return 0;
952
953         return -EINVAL;
954 }
955
956 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
957                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
958 {
959         struct amdgpu_ras_block_list *node, *tmp;
960         struct amdgpu_ras_block_object *obj;
961
962         if (block >= AMDGPU_RAS_BLOCK__LAST)
963                 return NULL;
964
965         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
966                 if (!node->ras_obj) {
967                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
968                         continue;
969                 }
970
971                 obj = node->ras_obj;
972                 if (obj->ras_block_match) {
973                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
974                                 return obj;
975                 } else {
976                         if (amdgpu_ras_block_match_default(obj, block) == 0)
977                                 return obj;
978                 }
979         }
980
981         return NULL;
982 }
983
984 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
985 {
986         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
987         int ret = 0;
988
989         /*
990          * choosing right query method according to
991          * whether smu support query error information
992          */
993         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
994         if (ret == -EOPNOTSUPP) {
995                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
996                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
997                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
998
999                 /* umc query_ras_error_address is also responsible for clearing
1000                  * error status
1001                  */
1002                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1003                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1004                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1005         } else if (!ret) {
1006                 if (adev->umc.ras &&
1007                         adev->umc.ras->ecc_info_query_ras_error_count)
1008                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1009
1010                 if (adev->umc.ras &&
1011                         adev->umc.ras->ecc_info_query_ras_error_address)
1012                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1013         }
1014 }
1015
1016 /* query/inject/cure begin */
1017 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
1018                                   struct ras_query_if *info)
1019 {
1020         struct amdgpu_ras_block_object *block_obj = NULL;
1021         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1022         struct ras_err_data err_data = {0, 0, 0, NULL};
1023
1024         if (!obj)
1025                 return -EINVAL;
1026
1027         if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1028                 amdgpu_ras_get_ecc_info(adev, &err_data);
1029         } else {
1030                 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1031                 if (!block_obj || !block_obj->hw_ops)   {
1032                         dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1033                                      get_ras_block_str(&info->head));
1034                         return -EINVAL;
1035                 }
1036
1037                 if (block_obj->hw_ops->query_ras_error_count)
1038                         block_obj->hw_ops->query_ras_error_count(adev, &err_data);
1039
1040                 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1041                     (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1042                     (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1043                                 if (block_obj->hw_ops->query_ras_error_status)
1044                                         block_obj->hw_ops->query_ras_error_status(adev);
1045                         }
1046         }
1047
1048         obj->err_data.ue_count += err_data.ue_count;
1049         obj->err_data.ce_count += err_data.ce_count;
1050
1051         info->ue_count = obj->err_data.ue_count;
1052         info->ce_count = obj->err_data.ce_count;
1053
1054         if (err_data.ce_count) {
1055                 if (adev->smuio.funcs &&
1056                     adev->smuio.funcs->get_socket_id &&
1057                     adev->smuio.funcs->get_die_id) {
1058                         dev_info(adev->dev, "socket: %d, die: %d "
1059                                         "%ld correctable hardware errors "
1060                                         "detected in %s block, no user "
1061                                         "action is needed.\n",
1062                                         adev->smuio.funcs->get_socket_id(adev),
1063                                         adev->smuio.funcs->get_die_id(adev),
1064                                         obj->err_data.ce_count,
1065                                         get_ras_block_str(&info->head));
1066                 } else {
1067                         dev_info(adev->dev, "%ld correctable hardware errors "
1068                                         "detected in %s block, no user "
1069                                         "action is needed.\n",
1070                                         obj->err_data.ce_count,
1071                                         get_ras_block_str(&info->head));
1072                 }
1073         }
1074         if (err_data.ue_count) {
1075                 if (adev->smuio.funcs &&
1076                     adev->smuio.funcs->get_socket_id &&
1077                     adev->smuio.funcs->get_die_id) {
1078                         dev_info(adev->dev, "socket: %d, die: %d "
1079                                         "%ld uncorrectable hardware errors "
1080                                         "detected in %s block\n",
1081                                         adev->smuio.funcs->get_socket_id(adev),
1082                                         adev->smuio.funcs->get_die_id(adev),
1083                                         obj->err_data.ue_count,
1084                                         get_ras_block_str(&info->head));
1085                 } else {
1086                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
1087                                         "detected in %s block\n",
1088                                         obj->err_data.ue_count,
1089                                         get_ras_block_str(&info->head));
1090                 }
1091         }
1092
1093         return 0;
1094 }
1095
1096 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1097                 enum amdgpu_ras_block block)
1098 {
1099         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1100
1101         if (!amdgpu_ras_is_supported(adev, block))
1102                 return -EINVAL;
1103
1104         if (!block_obj || !block_obj->hw_ops)   {
1105                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1106                              ras_block_str(block));
1107                 return -EINVAL;
1108         }
1109
1110         if (block_obj->hw_ops->reset_ras_error_count)
1111                 block_obj->hw_ops->reset_ras_error_count(adev);
1112
1113         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1114             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1115                 if (block_obj->hw_ops->reset_ras_error_status)
1116                         block_obj->hw_ops->reset_ras_error_status(adev);
1117         }
1118
1119         return 0;
1120 }
1121
1122 /* wrapper of psp_ras_trigger_error */
1123 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1124                 struct ras_inject_if *info)
1125 {
1126         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1127         struct ta_ras_trigger_error_input block_info = {
1128                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1129                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1130                 .sub_block_index = info->head.sub_block_index,
1131                 .address = info->address,
1132                 .value = info->value,
1133         };
1134         int ret = -EINVAL;
1135         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1136                                                         info->head.block,
1137                                                         info->head.sub_block_index);
1138
1139         /* inject on guest isn't allowed, return success directly */
1140         if (amdgpu_sriov_vf(adev))
1141                 return 0;
1142
1143         if (!obj)
1144                 return -EINVAL;
1145
1146         if (!block_obj || !block_obj->hw_ops)   {
1147                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1148                              get_ras_block_str(&info->head));
1149                 return -EINVAL;
1150         }
1151
1152         /* Calculate XGMI relative offset */
1153         if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1154             info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1155                 block_info.address =
1156                         amdgpu_xgmi_get_relative_phy_addr(adev,
1157                                                           block_info.address);
1158         }
1159
1160         if (block_obj->hw_ops->ras_error_inject) {
1161                 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1162                         ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1163                 else /* Special ras_error_inject is defined (e.g: xgmi) */
1164                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1165                                                 info->instance_mask);
1166         } else {
1167                 /* default path */
1168                 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1169         }
1170
1171         if (ret)
1172                 dev_err(adev->dev, "ras inject %s failed %d\n",
1173                         get_ras_block_str(&info->head), ret);
1174
1175         return ret;
1176 }
1177
1178 /**
1179  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1180  * @adev: pointer to AMD GPU device
1181  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1182  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1183  * @query_info: pointer to ras_query_if
1184  *
1185  * Return 0 for query success or do nothing, otherwise return an error
1186  * on failures
1187  */
1188 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1189                                                unsigned long *ce_count,
1190                                                unsigned long *ue_count,
1191                                                struct ras_query_if *query_info)
1192 {
1193         int ret;
1194
1195         if (!query_info)
1196                 /* do nothing if query_info is not specified */
1197                 return 0;
1198
1199         ret = amdgpu_ras_query_error_status(adev, query_info);
1200         if (ret)
1201                 return ret;
1202
1203         *ce_count += query_info->ce_count;
1204         *ue_count += query_info->ue_count;
1205
1206         /* some hardware/IP supports read to clear
1207          * no need to explictly reset the err status after the query call */
1208         if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1209             adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1210                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1211                         dev_warn(adev->dev,
1212                                  "Failed to reset error counter and error status\n");
1213         }
1214
1215         return 0;
1216 }
1217
1218 /**
1219  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1220  * @adev: pointer to AMD GPU device
1221  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1222  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1223  * errors.
1224  * @query_info: pointer to ras_query_if if the query request is only for
1225  * specific ip block; if info is NULL, then the qurey request is for
1226  * all the ip blocks that support query ras error counters/status
1227  *
1228  * If set, @ce_count or @ue_count, count and return the corresponding
1229  * error counts in those integer pointers. Return 0 if the device
1230  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1231  */
1232 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1233                                  unsigned long *ce_count,
1234                                  unsigned long *ue_count,
1235                                  struct ras_query_if *query_info)
1236 {
1237         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1238         struct ras_manager *obj;
1239         unsigned long ce, ue;
1240         int ret;
1241
1242         if (!adev->ras_enabled || !con)
1243                 return -EOPNOTSUPP;
1244
1245         /* Don't count since no reporting.
1246          */
1247         if (!ce_count && !ue_count)
1248                 return 0;
1249
1250         ce = 0;
1251         ue = 0;
1252         if (!query_info) {
1253                 /* query all the ip blocks that support ras query interface */
1254                 list_for_each_entry(obj, &con->head, node) {
1255                         struct ras_query_if info = {
1256                                 .head = obj->head,
1257                         };
1258
1259                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1260                 }
1261         } else {
1262                 /* query specific ip block */
1263                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1264         }
1265
1266         if (ret)
1267                 return ret;
1268
1269         if (ce_count)
1270                 *ce_count = ce;
1271
1272         if (ue_count)
1273                 *ue_count = ue;
1274
1275         return 0;
1276 }
1277 /* query/inject/cure end */
1278
1279
1280 /* sysfs begin */
1281
1282 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1283                 struct ras_badpage **bps, unsigned int *count);
1284
1285 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1286 {
1287         switch (flags) {
1288         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1289                 return "R";
1290         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1291                 return "P";
1292         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1293         default:
1294                 return "F";
1295         }
1296 }
1297
1298 /**
1299  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1300  *
1301  * It allows user to read the bad pages of vram on the gpu through
1302  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1303  *
1304  * It outputs multiple lines, and each line stands for one gpu page.
1305  *
1306  * The format of one line is below,
1307  * gpu pfn : gpu page size : flags
1308  *
1309  * gpu pfn and gpu page size are printed in hex format.
1310  * flags can be one of below character,
1311  *
1312  * R: reserved, this gpu page is reserved and not able to use.
1313  *
1314  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1315  * in next window of page_reserve.
1316  *
1317  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1318  *
1319  * Examples:
1320  *
1321  * .. code-block:: bash
1322  *
1323  *      0x00000001 : 0x00001000 : R
1324  *      0x00000002 : 0x00001000 : P
1325  *
1326  */
1327
1328 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1329                 struct kobject *kobj, struct bin_attribute *attr,
1330                 char *buf, loff_t ppos, size_t count)
1331 {
1332         struct amdgpu_ras *con =
1333                 container_of(attr, struct amdgpu_ras, badpages_attr);
1334         struct amdgpu_device *adev = con->adev;
1335         const unsigned int element_size =
1336                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1337         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1338         unsigned int end = div64_ul(ppos + count - 1, element_size);
1339         ssize_t s = 0;
1340         struct ras_badpage *bps = NULL;
1341         unsigned int bps_count = 0;
1342
1343         memset(buf, 0, count);
1344
1345         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1346                 return 0;
1347
1348         for (; start < end && start < bps_count; start++)
1349                 s += scnprintf(&buf[s], element_size + 1,
1350                                 "0x%08x : 0x%08x : %1s\n",
1351                                 bps[start].bp,
1352                                 bps[start].size,
1353                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1354
1355         kfree(bps);
1356
1357         return s;
1358 }
1359
1360 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1361                 struct device_attribute *attr, char *buf)
1362 {
1363         struct amdgpu_ras *con =
1364                 container_of(attr, struct amdgpu_ras, features_attr);
1365
1366         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1367 }
1368
1369 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1370 {
1371         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1372
1373         sysfs_remove_file_from_group(&adev->dev->kobj,
1374                                 &con->badpages_attr.attr,
1375                                 RAS_FS_NAME);
1376 }
1377
1378 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1379 {
1380         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1381         struct attribute *attrs[] = {
1382                 &con->features_attr.attr,
1383                 NULL
1384         };
1385         struct attribute_group group = {
1386                 .name = RAS_FS_NAME,
1387                 .attrs = attrs,
1388         };
1389
1390         sysfs_remove_group(&adev->dev->kobj, &group);
1391
1392         return 0;
1393 }
1394
1395 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1396                 struct ras_common_if *head)
1397 {
1398         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1399
1400         if (!obj || obj->attr_inuse)
1401                 return -EINVAL;
1402
1403         get_obj(obj);
1404
1405         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1406                 "%s_err_count", head->name);
1407
1408         obj->sysfs_attr = (struct device_attribute){
1409                 .attr = {
1410                         .name = obj->fs_data.sysfs_name,
1411                         .mode = S_IRUGO,
1412                 },
1413                         .show = amdgpu_ras_sysfs_read,
1414         };
1415         sysfs_attr_init(&obj->sysfs_attr.attr);
1416
1417         if (sysfs_add_file_to_group(&adev->dev->kobj,
1418                                 &obj->sysfs_attr.attr,
1419                                 RAS_FS_NAME)) {
1420                 put_obj(obj);
1421                 return -EINVAL;
1422         }
1423
1424         obj->attr_inuse = 1;
1425
1426         return 0;
1427 }
1428
1429 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1430                 struct ras_common_if *head)
1431 {
1432         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1433
1434         if (!obj || !obj->attr_inuse)
1435                 return -EINVAL;
1436
1437         sysfs_remove_file_from_group(&adev->dev->kobj,
1438                                 &obj->sysfs_attr.attr,
1439                                 RAS_FS_NAME);
1440         obj->attr_inuse = 0;
1441         put_obj(obj);
1442
1443         return 0;
1444 }
1445
1446 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1447 {
1448         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1449         struct ras_manager *obj, *tmp;
1450
1451         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1452                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1453         }
1454
1455         if (amdgpu_bad_page_threshold != 0)
1456                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1457
1458         amdgpu_ras_sysfs_remove_feature_node(adev);
1459
1460         return 0;
1461 }
1462 /* sysfs end */
1463
1464 /**
1465  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1466  *
1467  * Normally when there is an uncorrectable error, the driver will reset
1468  * the GPU to recover.  However, in the event of an unrecoverable error,
1469  * the driver provides an interface to reboot the system automatically
1470  * in that event.
1471  *
1472  * The following file in debugfs provides that interface:
1473  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1474  *
1475  * Usage:
1476  *
1477  * .. code-block:: bash
1478  *
1479  *      echo true > .../ras/auto_reboot
1480  *
1481  */
1482 /* debugfs begin */
1483 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1484 {
1485         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1486         struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1487         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1488         struct dentry     *dir;
1489
1490         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1491         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1492                             &amdgpu_ras_debugfs_ctrl_ops);
1493         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1494                             &amdgpu_ras_debugfs_eeprom_ops);
1495         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1496                            &con->bad_page_cnt_threshold);
1497         debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1498         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1499         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1500         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1501                             &amdgpu_ras_debugfs_eeprom_size_ops);
1502         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1503                                                        S_IRUGO, dir, adev,
1504                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1505         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1506
1507         /*
1508          * After one uncorrectable error happens, usually GPU recovery will
1509          * be scheduled. But due to the known problem in GPU recovery failing
1510          * to bring GPU back, below interface provides one direct way to
1511          * user to reboot system automatically in such case within
1512          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1513          * will never be called.
1514          */
1515         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1516
1517         /*
1518          * User could set this not to clean up hardware's error count register
1519          * of RAS IPs during ras recovery.
1520          */
1521         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1522                             &con->disable_ras_err_cnt_harvest);
1523         return dir;
1524 }
1525
1526 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1527                                       struct ras_fs_if *head,
1528                                       struct dentry *dir)
1529 {
1530         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1531
1532         if (!obj || !dir)
1533                 return;
1534
1535         get_obj(obj);
1536
1537         memcpy(obj->fs_data.debugfs_name,
1538                         head->debugfs_name,
1539                         sizeof(obj->fs_data.debugfs_name));
1540
1541         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1542                             obj, &amdgpu_ras_debugfs_ops);
1543 }
1544
1545 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1546 {
1547         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1548         struct dentry *dir;
1549         struct ras_manager *obj;
1550         struct ras_fs_if fs_info;
1551
1552         /*
1553          * it won't be called in resume path, no need to check
1554          * suspend and gpu reset status
1555          */
1556         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1557                 return;
1558
1559         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1560
1561         list_for_each_entry(obj, &con->head, node) {
1562                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1563                         (obj->attr_inuse == 1)) {
1564                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1565                                         get_ras_block_str(&obj->head));
1566                         fs_info.head = obj->head;
1567                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1568                 }
1569         }
1570 }
1571
1572 /* debugfs end */
1573
1574 /* ras fs */
1575 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1576                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1577 static DEVICE_ATTR(features, S_IRUGO,
1578                 amdgpu_ras_sysfs_features_read, NULL);
1579 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1580 {
1581         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1582         struct attribute_group group = {
1583                 .name = RAS_FS_NAME,
1584         };
1585         struct attribute *attrs[] = {
1586                 &con->features_attr.attr,
1587                 NULL
1588         };
1589         struct bin_attribute *bin_attrs[] = {
1590                 NULL,
1591                 NULL,
1592         };
1593         int r;
1594
1595         /* add features entry */
1596         con->features_attr = dev_attr_features;
1597         group.attrs = attrs;
1598         sysfs_attr_init(attrs[0]);
1599
1600         if (amdgpu_bad_page_threshold != 0) {
1601                 /* add bad_page_features entry */
1602                 bin_attr_gpu_vram_bad_pages.private = NULL;
1603                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1604                 bin_attrs[0] = &con->badpages_attr;
1605                 group.bin_attrs = bin_attrs;
1606                 sysfs_bin_attr_init(bin_attrs[0]);
1607         }
1608
1609         r = sysfs_create_group(&adev->dev->kobj, &group);
1610         if (r)
1611                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1612
1613         return 0;
1614 }
1615
1616 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1617 {
1618         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1619         struct ras_manager *con_obj, *ip_obj, *tmp;
1620
1621         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1622                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1623                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1624                         if (ip_obj)
1625                                 put_obj(ip_obj);
1626                 }
1627         }
1628
1629         amdgpu_ras_sysfs_remove_all(adev);
1630         return 0;
1631 }
1632 /* ras fs end */
1633
1634 /* ih begin */
1635
1636 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1637  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1638  * register to check whether the interrupt is triggered or not, and properly
1639  * ack the interrupt if it is there
1640  */
1641 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1642 {
1643         /* Fatal error events are handled on host side */
1644         if (amdgpu_sriov_vf(adev))
1645                 return;
1646
1647         if (adev->nbio.ras &&
1648             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1649                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1650
1651         if (adev->nbio.ras &&
1652             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1653                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1654 }
1655
1656 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1657                                 struct amdgpu_iv_entry *entry)
1658 {
1659         bool poison_stat = false;
1660         struct amdgpu_device *adev = obj->adev;
1661         struct amdgpu_ras_block_object *block_obj =
1662                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1663
1664         if (!block_obj)
1665                 return;
1666
1667         /* both query_poison_status and handle_poison_consumption are optional,
1668          * but at least one of them should be implemented if we need poison
1669          * consumption handler
1670          */
1671         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1672                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1673                 if (!poison_stat) {
1674                         /* Not poison consumption interrupt, no need to handle it */
1675                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1676                                         block_obj->ras_comm.name);
1677
1678                         return;
1679                 }
1680         }
1681
1682         amdgpu_umc_poison_handler(adev, false);
1683
1684         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1685                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1686
1687         /* gpu reset is fallback for failed and default cases */
1688         if (poison_stat) {
1689                 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1690                                 block_obj->ras_comm.name);
1691                 amdgpu_ras_reset_gpu(adev);
1692         } else {
1693                 amdgpu_gfx_poison_consumption_handler(adev, entry);
1694         }
1695 }
1696
1697 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1698                                 struct amdgpu_iv_entry *entry)
1699 {
1700         dev_info(obj->adev->dev,
1701                 "Poison is created, no user action is needed.\n");
1702 }
1703
1704 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1705                                 struct amdgpu_iv_entry *entry)
1706 {
1707         struct ras_ih_data *data = &obj->ih_data;
1708         struct ras_err_data err_data = {0, 0, 0, NULL};
1709         int ret;
1710
1711         if (!data->cb)
1712                 return;
1713
1714         /* Let IP handle its data, maybe we need get the output
1715          * from the callback to update the error type/count, etc
1716          */
1717         ret = data->cb(obj->adev, &err_data, entry);
1718         /* ue will trigger an interrupt, and in that case
1719          * we need do a reset to recovery the whole system.
1720          * But leave IP do that recovery, here we just dispatch
1721          * the error.
1722          */
1723         if (ret == AMDGPU_RAS_SUCCESS) {
1724                 /* these counts could be left as 0 if
1725                  * some blocks do not count error number
1726                  */
1727                 obj->err_data.ue_count += err_data.ue_count;
1728                 obj->err_data.ce_count += err_data.ce_count;
1729         }
1730 }
1731
1732 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1733 {
1734         struct ras_ih_data *data = &obj->ih_data;
1735         struct amdgpu_iv_entry entry;
1736
1737         while (data->rptr != data->wptr) {
1738                 rmb();
1739                 memcpy(&entry, &data->ring[data->rptr],
1740                                 data->element_size);
1741
1742                 wmb();
1743                 data->rptr = (data->aligned_element_size +
1744                                 data->rptr) % data->ring_size;
1745
1746                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1747                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1748                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1749                         else
1750                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1751                 } else {
1752                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1753                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1754                         else
1755                                 dev_warn(obj->adev->dev,
1756                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1757                 }
1758         }
1759 }
1760
1761 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1762 {
1763         struct ras_ih_data *data =
1764                 container_of(work, struct ras_ih_data, ih_work);
1765         struct ras_manager *obj =
1766                 container_of(data, struct ras_manager, ih_data);
1767
1768         amdgpu_ras_interrupt_handler(obj);
1769 }
1770
1771 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1772                 struct ras_dispatch_if *info)
1773 {
1774         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1775         struct ras_ih_data *data = &obj->ih_data;
1776
1777         if (!obj)
1778                 return -EINVAL;
1779
1780         if (data->inuse == 0)
1781                 return 0;
1782
1783         /* Might be overflow... */
1784         memcpy(&data->ring[data->wptr], info->entry,
1785                         data->element_size);
1786
1787         wmb();
1788         data->wptr = (data->aligned_element_size +
1789                         data->wptr) % data->ring_size;
1790
1791         schedule_work(&data->ih_work);
1792
1793         return 0;
1794 }
1795
1796 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1797                 struct ras_common_if *head)
1798 {
1799         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1800         struct ras_ih_data *data;
1801
1802         if (!obj)
1803                 return -EINVAL;
1804
1805         data = &obj->ih_data;
1806         if (data->inuse == 0)
1807                 return 0;
1808
1809         cancel_work_sync(&data->ih_work);
1810
1811         kfree(data->ring);
1812         memset(data, 0, sizeof(*data));
1813         put_obj(obj);
1814
1815         return 0;
1816 }
1817
1818 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1819                 struct ras_common_if *head)
1820 {
1821         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1822         struct ras_ih_data *data;
1823         struct amdgpu_ras_block_object *ras_obj;
1824
1825         if (!obj) {
1826                 /* in case we registe the IH before enable ras feature */
1827                 obj = amdgpu_ras_create_obj(adev, head);
1828                 if (!obj)
1829                         return -EINVAL;
1830         } else
1831                 get_obj(obj);
1832
1833         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1834
1835         data = &obj->ih_data;
1836         /* add the callback.etc */
1837         *data = (struct ras_ih_data) {
1838                 .inuse = 0,
1839                 .cb = ras_obj->ras_cb,
1840                 .element_size = sizeof(struct amdgpu_iv_entry),
1841                 .rptr = 0,
1842                 .wptr = 0,
1843         };
1844
1845         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1846
1847         data->aligned_element_size = ALIGN(data->element_size, 8);
1848         /* the ring can store 64 iv entries. */
1849         data->ring_size = 64 * data->aligned_element_size;
1850         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1851         if (!data->ring) {
1852                 put_obj(obj);
1853                 return -ENOMEM;
1854         }
1855
1856         /* IH is ready */
1857         data->inuse = 1;
1858
1859         return 0;
1860 }
1861
1862 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1863 {
1864         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1865         struct ras_manager *obj, *tmp;
1866
1867         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1868                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1869         }
1870
1871         return 0;
1872 }
1873 /* ih end */
1874
1875 /* traversal all IPs except NBIO to query error counter */
1876 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1877 {
1878         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1879         struct ras_manager *obj;
1880
1881         if (!adev->ras_enabled || !con)
1882                 return;
1883
1884         list_for_each_entry(obj, &con->head, node) {
1885                 struct ras_query_if info = {
1886                         .head = obj->head,
1887                 };
1888
1889                 /*
1890                  * PCIE_BIF IP has one different isr by ras controller
1891                  * interrupt, the specific ras counter query will be
1892                  * done in that isr. So skip such block from common
1893                  * sync flood interrupt isr calling.
1894                  */
1895                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1896                         continue;
1897
1898                 /*
1899                  * this is a workaround for aldebaran, skip send msg to
1900                  * smu to get ecc_info table due to smu handle get ecc
1901                  * info table failed temporarily.
1902                  * should be removed until smu fix handle ecc_info table.
1903                  */
1904                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1905                         (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1906                         continue;
1907
1908                 amdgpu_ras_query_error_status(adev, &info);
1909
1910                 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1911                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1912                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1913                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
1914                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
1915                 }
1916         }
1917 }
1918
1919 /* Parse RdRspStatus and WrRspStatus */
1920 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1921                                           struct ras_query_if *info)
1922 {
1923         struct amdgpu_ras_block_object *block_obj;
1924         /*
1925          * Only two block need to query read/write
1926          * RspStatus at current state
1927          */
1928         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1929                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1930                 return;
1931
1932         block_obj = amdgpu_ras_get_ras_block(adev,
1933                                         info->head.block,
1934                                         info->head.sub_block_index);
1935
1936         if (!block_obj || !block_obj->hw_ops) {
1937                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1938                              get_ras_block_str(&info->head));
1939                 return;
1940         }
1941
1942         if (block_obj->hw_ops->query_ras_error_status)
1943                 block_obj->hw_ops->query_ras_error_status(adev);
1944
1945 }
1946
1947 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1948 {
1949         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1950         struct ras_manager *obj;
1951
1952         if (!adev->ras_enabled || !con)
1953                 return;
1954
1955         list_for_each_entry(obj, &con->head, node) {
1956                 struct ras_query_if info = {
1957                         .head = obj->head,
1958                 };
1959
1960                 amdgpu_ras_error_status_query(adev, &info);
1961         }
1962 }
1963
1964 /* recovery begin */
1965
1966 /* return 0 on success.
1967  * caller need free bps.
1968  */
1969 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1970                 struct ras_badpage **bps, unsigned int *count)
1971 {
1972         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1973         struct ras_err_handler_data *data;
1974         int i = 0;
1975         int ret = 0, status;
1976
1977         if (!con || !con->eh_data || !bps || !count)
1978                 return -EINVAL;
1979
1980         mutex_lock(&con->recovery_lock);
1981         data = con->eh_data;
1982         if (!data || data->count == 0) {
1983                 *bps = NULL;
1984                 ret = -EINVAL;
1985                 goto out;
1986         }
1987
1988         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1989         if (!*bps) {
1990                 ret = -ENOMEM;
1991                 goto out;
1992         }
1993
1994         for (; i < data->count; i++) {
1995                 (*bps)[i] = (struct ras_badpage){
1996                         .bp = data->bps[i].retired_page,
1997                         .size = AMDGPU_GPU_PAGE_SIZE,
1998                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1999                 };
2000                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2001                                 data->bps[i].retired_page);
2002                 if (status == -EBUSY)
2003                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2004                 else if (status == -ENOENT)
2005                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2006         }
2007
2008         *count = data->count;
2009 out:
2010         mutex_unlock(&con->recovery_lock);
2011         return ret;
2012 }
2013
2014 static void amdgpu_ras_do_recovery(struct work_struct *work)
2015 {
2016         struct amdgpu_ras *ras =
2017                 container_of(work, struct amdgpu_ras, recovery_work);
2018         struct amdgpu_device *remote_adev = NULL;
2019         struct amdgpu_device *adev = ras->adev;
2020         struct list_head device_list, *device_list_handle =  NULL;
2021
2022         if (!ras->disable_ras_err_cnt_harvest) {
2023                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2024
2025                 /* Build list of devices to query RAS related errors */
2026                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2027                         device_list_handle = &hive->device_list;
2028                 } else {
2029                         INIT_LIST_HEAD(&device_list);
2030                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
2031                         device_list_handle = &device_list;
2032                 }
2033
2034                 list_for_each_entry(remote_adev,
2035                                 device_list_handle, gmc.xgmi.head) {
2036                         amdgpu_ras_query_err_status(remote_adev);
2037                         amdgpu_ras_log_on_err_counter(remote_adev);
2038                 }
2039
2040                 amdgpu_put_xgmi_hive(hive);
2041         }
2042
2043         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2044                 struct amdgpu_reset_context reset_context;
2045                 memset(&reset_context, 0, sizeof(reset_context));
2046
2047                 reset_context.method = AMD_RESET_METHOD_NONE;
2048                 reset_context.reset_req_dev = adev;
2049
2050                 /* Perform full reset in fatal error mode */
2051                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2052                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2053                 else {
2054                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2055
2056                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2057                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2058                                 reset_context.method = AMD_RESET_METHOD_MODE2;
2059                         }
2060
2061                         /* Fatal error occurs in poison mode, mode1 reset is used to
2062                          * recover gpu.
2063                          */
2064                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2065                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2066                                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2067
2068                                 psp_fatal_error_recovery_quirk(&adev->psp);
2069                         }
2070                 }
2071
2072                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2073         }
2074         atomic_set(&ras->in_recovery, 0);
2075 }
2076
2077 /* alloc/realloc bps array */
2078 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2079                 struct ras_err_handler_data *data, int pages)
2080 {
2081         unsigned int old_space = data->count + data->space_left;
2082         unsigned int new_space = old_space + pages;
2083         unsigned int align_space = ALIGN(new_space, 512);
2084         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2085
2086         if (!bps) {
2087                 return -ENOMEM;
2088         }
2089
2090         if (data->bps) {
2091                 memcpy(bps, data->bps,
2092                                 data->count * sizeof(*data->bps));
2093                 kfree(data->bps);
2094         }
2095
2096         data->bps = bps;
2097         data->space_left += align_space - old_space;
2098         return 0;
2099 }
2100
2101 /* it deal with vram only. */
2102 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2103                 struct eeprom_table_record *bps, int pages)
2104 {
2105         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2106         struct ras_err_handler_data *data;
2107         int ret = 0;
2108         uint32_t i;
2109
2110         if (!con || !con->eh_data || !bps || pages <= 0)
2111                 return 0;
2112
2113         mutex_lock(&con->recovery_lock);
2114         data = con->eh_data;
2115         if (!data)
2116                 goto out;
2117
2118         for (i = 0; i < pages; i++) {
2119                 if (amdgpu_ras_check_bad_page_unlock(con,
2120                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2121                         continue;
2122
2123                 if (!data->space_left &&
2124                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2125                         ret = -ENOMEM;
2126                         goto out;
2127                 }
2128
2129                 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2130                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2131                         AMDGPU_GPU_PAGE_SIZE);
2132
2133                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2134                 data->count++;
2135                 data->space_left--;
2136         }
2137 out:
2138         mutex_unlock(&con->recovery_lock);
2139
2140         return ret;
2141 }
2142
2143 /*
2144  * write error record array to eeprom, the function should be
2145  * protected by recovery_lock
2146  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2147  */
2148 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2149                 unsigned long *new_cnt)
2150 {
2151         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2152         struct ras_err_handler_data *data;
2153         struct amdgpu_ras_eeprom_control *control;
2154         int save_count;
2155
2156         if (!con || !con->eh_data) {
2157                 if (new_cnt)
2158                         *new_cnt = 0;
2159
2160                 return 0;
2161         }
2162
2163         mutex_lock(&con->recovery_lock);
2164         control = &con->eeprom_control;
2165         data = con->eh_data;
2166         save_count = data->count - control->ras_num_recs;
2167         mutex_unlock(&con->recovery_lock);
2168
2169         if (new_cnt)
2170                 *new_cnt = save_count / adev->umc.retire_unit;
2171
2172         /* only new entries are saved */
2173         if (save_count > 0) {
2174                 if (amdgpu_ras_eeprom_append(control,
2175                                              &data->bps[control->ras_num_recs],
2176                                              save_count)) {
2177                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2178                         return -EIO;
2179                 }
2180
2181                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2182         }
2183
2184         return 0;
2185 }
2186
2187 /*
2188  * read error record array in eeprom and reserve enough space for
2189  * storing new bad pages
2190  */
2191 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2192 {
2193         struct amdgpu_ras_eeprom_control *control =
2194                 &adev->psp.ras_context.ras->eeprom_control;
2195         struct eeprom_table_record *bps;
2196         int ret;
2197
2198         /* no bad page record, skip eeprom access */
2199         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2200                 return 0;
2201
2202         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2203         if (!bps)
2204                 return -ENOMEM;
2205
2206         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2207         if (ret)
2208                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2209         else
2210                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2211
2212         kfree(bps);
2213         return ret;
2214 }
2215
2216 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2217                                 uint64_t addr)
2218 {
2219         struct ras_err_handler_data *data = con->eh_data;
2220         int i;
2221
2222         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2223         for (i = 0; i < data->count; i++)
2224                 if (addr == data->bps[i].retired_page)
2225                         return true;
2226
2227         return false;
2228 }
2229
2230 /*
2231  * check if an address belongs to bad page
2232  *
2233  * Note: this check is only for umc block
2234  */
2235 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2236                                 uint64_t addr)
2237 {
2238         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2239         bool ret = false;
2240
2241         if (!con || !con->eh_data)
2242                 return ret;
2243
2244         mutex_lock(&con->recovery_lock);
2245         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2246         mutex_unlock(&con->recovery_lock);
2247         return ret;
2248 }
2249
2250 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2251                                           uint32_t max_count)
2252 {
2253         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2254
2255         /*
2256          * Justification of value bad_page_cnt_threshold in ras structure
2257          *
2258          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2259          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2260          * scenarios accordingly.
2261          *
2262          * Bad page retirement enablement:
2263          *    - If amdgpu_bad_page_threshold = -2,
2264          *      bad_page_cnt_threshold = typical value by formula.
2265          *
2266          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2267          *      max record length in eeprom, use it directly.
2268          *
2269          * Bad page retirement disablement:
2270          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2271          *      functionality is disabled, and bad_page_cnt_threshold will
2272          *      take no effect.
2273          */
2274
2275         if (amdgpu_bad_page_threshold < 0) {
2276                 u64 val = adev->gmc.mc_vram_size;
2277
2278                 do_div(val, RAS_BAD_PAGE_COVER);
2279                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2280                                                   max_count);
2281         } else {
2282                 con->bad_page_cnt_threshold = min_t(int, max_count,
2283                                                     amdgpu_bad_page_threshold);
2284         }
2285 }
2286
2287 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2288 {
2289         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2290         struct ras_err_handler_data **data;
2291         u32  max_eeprom_records_count = 0;
2292         bool exc_err_limit = false;
2293         int ret;
2294
2295         if (!con || amdgpu_sriov_vf(adev))
2296                 return 0;
2297
2298         /* Allow access to RAS EEPROM via debugfs, when the ASIC
2299          * supports RAS and debugfs is enabled, but when
2300          * adev->ras_enabled is unset, i.e. when "ras_enable"
2301          * module parameter is set to 0.
2302          */
2303         con->adev = adev;
2304
2305         if (!adev->ras_enabled)
2306                 return 0;
2307
2308         data = &con->eh_data;
2309         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2310         if (!*data) {
2311                 ret = -ENOMEM;
2312                 goto out;
2313         }
2314
2315         mutex_init(&con->recovery_lock);
2316         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2317         atomic_set(&con->in_recovery, 0);
2318         con->eeprom_control.bad_channel_bitmap = 0;
2319
2320         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2321         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2322
2323         /* Todo: During test the SMU might fail to read the eeprom through I2C
2324          * when the GPU is pending on XGMI reset during probe time
2325          * (Mostly after second bus reset), skip it now
2326          */
2327         if (adev->gmc.xgmi.pending_reset)
2328                 return 0;
2329         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2330         /*
2331          * This calling fails when exc_err_limit is true or
2332          * ret != 0.
2333          */
2334         if (exc_err_limit || ret)
2335                 goto free;
2336
2337         if (con->eeprom_control.ras_num_recs) {
2338                 ret = amdgpu_ras_load_bad_pages(adev);
2339                 if (ret)
2340                         goto free;
2341
2342                 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2343
2344                 if (con->update_channel_flag == true) {
2345                         amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2346                         con->update_channel_flag = false;
2347                 }
2348         }
2349
2350 #ifdef CONFIG_X86_MCE_AMD
2351         if ((adev->asic_type == CHIP_ALDEBARAN) &&
2352             (adev->gmc.xgmi.connected_to_cpu))
2353                 amdgpu_register_bad_pages_mca_notifier(adev);
2354 #endif
2355         return 0;
2356
2357 free:
2358         kfree((*data)->bps);
2359         kfree(*data);
2360         con->eh_data = NULL;
2361 out:
2362         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2363
2364         /*
2365          * Except error threshold exceeding case, other failure cases in this
2366          * function would not fail amdgpu driver init.
2367          */
2368         if (!exc_err_limit)
2369                 ret = 0;
2370         else
2371                 ret = -EINVAL;
2372
2373         return ret;
2374 }
2375
2376 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2377 {
2378         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2379         struct ras_err_handler_data *data = con->eh_data;
2380
2381         /* recovery_init failed to init it, fini is useless */
2382         if (!data)
2383                 return 0;
2384
2385         cancel_work_sync(&con->recovery_work);
2386
2387         mutex_lock(&con->recovery_lock);
2388         con->eh_data = NULL;
2389         kfree(data->bps);
2390         kfree(data);
2391         mutex_unlock(&con->recovery_lock);
2392
2393         return 0;
2394 }
2395 /* recovery end */
2396
2397 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2398 {
2399         if (amdgpu_sriov_vf(adev)) {
2400                 switch (adev->ip_versions[MP0_HWIP][0]) {
2401                 case IP_VERSION(13, 0, 2):
2402                 case IP_VERSION(13, 0, 6):
2403                         return true;
2404                 default:
2405                         return false;
2406                 }
2407         }
2408
2409         if (adev->asic_type == CHIP_IP_DISCOVERY) {
2410                 switch (adev->ip_versions[MP0_HWIP][0]) {
2411                 case IP_VERSION(13, 0, 0):
2412                 case IP_VERSION(13, 0, 6):
2413                 case IP_VERSION(13, 0, 10):
2414                         return true;
2415                 default:
2416                         return false;
2417                 }
2418         }
2419
2420         return adev->asic_type == CHIP_VEGA10 ||
2421                 adev->asic_type == CHIP_VEGA20 ||
2422                 adev->asic_type == CHIP_ARCTURUS ||
2423                 adev->asic_type == CHIP_ALDEBARAN ||
2424                 adev->asic_type == CHIP_SIENNA_CICHLID;
2425 }
2426
2427 /*
2428  * this is workaround for vega20 workstation sku,
2429  * force enable gfx ras, ignore vbios gfx ras flag
2430  * due to GC EDC can not write
2431  */
2432 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2433 {
2434         struct atom_context *ctx = adev->mode_info.atom_context;
2435
2436         if (!ctx)
2437                 return;
2438
2439         if (strnstr(ctx->vbios_pn, "D16406",
2440                     sizeof(ctx->vbios_pn)) ||
2441                 strnstr(ctx->vbios_pn, "D36002",
2442                         sizeof(ctx->vbios_pn)))
2443                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2444 }
2445
2446 /*
2447  * check hardware's ras ability which will be saved in hw_supported.
2448  * if hardware does not support ras, we can skip some ras initializtion and
2449  * forbid some ras operations from IP.
2450  * if software itself, say boot parameter, limit the ras ability. We still
2451  * need allow IP do some limited operations, like disable. In such case,
2452  * we have to initialize ras as normal. but need check if operation is
2453  * allowed or not in each function.
2454  */
2455 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2456 {
2457         adev->ras_hw_enabled = adev->ras_enabled = 0;
2458
2459         if (!amdgpu_ras_asic_supported(adev))
2460                 return;
2461
2462         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2463                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2464                         dev_info(adev->dev, "MEM ECC is active.\n");
2465                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2466                                                    1 << AMDGPU_RAS_BLOCK__DF);
2467                 } else {
2468                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2469                 }
2470
2471                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2472                         dev_info(adev->dev, "SRAM ECC is active.\n");
2473                         if (!amdgpu_sriov_vf(adev))
2474                                 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2475                                                             1 << AMDGPU_RAS_BLOCK__DF);
2476                         else
2477                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2478                                                                 1 << AMDGPU_RAS_BLOCK__SDMA |
2479                                                                 1 << AMDGPU_RAS_BLOCK__GFX);
2480
2481                         /* VCN/JPEG RAS can be supported on both bare metal and
2482                          * SRIOV environment
2483                          */
2484                         if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2485                             adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2486                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2487                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2488                         else
2489                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2490                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2491
2492                         /*
2493                          * XGMI RAS is not supported if xgmi num physical nodes
2494                          * is zero
2495                          */
2496                         if (!adev->gmc.xgmi.num_physical_nodes)
2497                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2498                 } else {
2499                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2500                 }
2501         } else {
2502                 /* driver only manages a few IP blocks RAS feature
2503                  * when GPU is connected cpu through XGMI */
2504                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2505                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2506                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2507         }
2508
2509         amdgpu_ras_get_quirks(adev);
2510
2511         /* hw_supported needs to be aligned with RAS block mask. */
2512         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2513
2514
2515         /*
2516          * Disable ras feature for aqua vanjaram
2517          * by default on apu platform.
2518          */
2519         if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&
2520             adev->gmc.is_app_apu)
2521                 adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
2522                         adev->ras_hw_enabled & amdgpu_ras_mask;
2523         else
2524                 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2525                         adev->ras_hw_enabled & amdgpu_ras_mask;
2526 }
2527
2528 static void amdgpu_ras_counte_dw(struct work_struct *work)
2529 {
2530         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2531                                               ras_counte_delay_work.work);
2532         struct amdgpu_device *adev = con->adev;
2533         struct drm_device *dev = adev_to_drm(adev);
2534         unsigned long ce_count, ue_count;
2535         int res;
2536
2537         res = pm_runtime_get_sync(dev->dev);
2538         if (res < 0)
2539                 goto Out;
2540
2541         /* Cache new values.
2542          */
2543         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2544                 atomic_set(&con->ras_ce_count, ce_count);
2545                 atomic_set(&con->ras_ue_count, ue_count);
2546         }
2547
2548         pm_runtime_mark_last_busy(dev->dev);
2549 Out:
2550         pm_runtime_put_autosuspend(dev->dev);
2551 }
2552
2553 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2554 {
2555         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2556         bool df_poison, umc_poison;
2557
2558         /* poison setting is useless on SRIOV guest */
2559         if (amdgpu_sriov_vf(adev) || !con)
2560                 return;
2561
2562         /* Init poison supported flag, the default value is false */
2563         if (adev->gmc.xgmi.connected_to_cpu) {
2564                 /* enabled by default when GPU is connected to CPU */
2565                 con->poison_supported = true;
2566         } else if (adev->df.funcs &&
2567             adev->df.funcs->query_ras_poison_mode &&
2568             adev->umc.ras &&
2569             adev->umc.ras->query_ras_poison_mode) {
2570                 df_poison =
2571                         adev->df.funcs->query_ras_poison_mode(adev);
2572                 umc_poison =
2573                         adev->umc.ras->query_ras_poison_mode(adev);
2574
2575                 /* Only poison is set in both DF and UMC, we can support it */
2576                 if (df_poison && umc_poison)
2577                         con->poison_supported = true;
2578                 else if (df_poison != umc_poison)
2579                         dev_warn(adev->dev,
2580                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2581                                 df_poison, umc_poison);
2582         }
2583 }
2584
2585 int amdgpu_ras_init(struct amdgpu_device *adev)
2586 {
2587         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2588         int r;
2589
2590         if (con)
2591                 return 0;
2592
2593         con = kmalloc(sizeof(struct amdgpu_ras) +
2594                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2595                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2596                         GFP_KERNEL|__GFP_ZERO);
2597         if (!con)
2598                 return -ENOMEM;
2599
2600         con->adev = adev;
2601         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2602         atomic_set(&con->ras_ce_count, 0);
2603         atomic_set(&con->ras_ue_count, 0);
2604
2605         con->objs = (struct ras_manager *)(con + 1);
2606
2607         amdgpu_ras_set_context(adev, con);
2608
2609         amdgpu_ras_check_supported(adev);
2610
2611         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2612                 /* set gfx block ras context feature for VEGA20 Gaming
2613                  * send ras disable cmd to ras ta during ras late init.
2614                  */
2615                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2616                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2617
2618                         return 0;
2619                 }
2620
2621                 r = 0;
2622                 goto release_con;
2623         }
2624
2625         con->update_channel_flag = false;
2626         con->features = 0;
2627         INIT_LIST_HEAD(&con->head);
2628         /* Might need get this flag from vbios. */
2629         con->flags = RAS_DEFAULT_FLAGS;
2630
2631         /* initialize nbio ras function ahead of any other
2632          * ras functions so hardware fatal error interrupt
2633          * can be enabled as early as possible */
2634         switch (adev->ip_versions[NBIO_HWIP][0]) {
2635         case IP_VERSION(7, 4, 0):
2636         case IP_VERSION(7, 4, 1):
2637         case IP_VERSION(7, 4, 4):
2638                 if (!adev->gmc.xgmi.connected_to_cpu)
2639                         adev->nbio.ras = &nbio_v7_4_ras;
2640                 break;
2641         case IP_VERSION(4, 3, 0):
2642                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2643                         /* unlike other generation of nbio ras,
2644                          * nbio v4_3 only support fatal error interrupt
2645                          * to inform software that DF is freezed due to
2646                          * system fatal error event. driver should not
2647                          * enable nbio ras in such case. Instead,
2648                          * check DF RAS */
2649                         adev->nbio.ras = &nbio_v4_3_ras;
2650                 break;
2651         case IP_VERSION(7, 9, 0):
2652                 if (!adev->gmc.is_app_apu)
2653                         adev->nbio.ras = &nbio_v7_9_ras;
2654                 break;
2655         default:
2656                 /* nbio ras is not available */
2657                 break;
2658         }
2659
2660         /* nbio ras block needs to be enabled ahead of other ras blocks
2661          * to handle fatal error */
2662         r = amdgpu_nbio_ras_sw_init(adev);
2663         if (r)
2664                 return r;
2665
2666         if (adev->nbio.ras &&
2667             adev->nbio.ras->init_ras_controller_interrupt) {
2668                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2669                 if (r)
2670                         goto release_con;
2671         }
2672
2673         if (adev->nbio.ras &&
2674             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2675                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2676                 if (r)
2677                         goto release_con;
2678         }
2679
2680         amdgpu_ras_query_poison_mode(adev);
2681
2682         if (amdgpu_ras_fs_init(adev)) {
2683                 r = -EINVAL;
2684                 goto release_con;
2685         }
2686
2687         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2688                  "hardware ability[%x] ras_mask[%x]\n",
2689                  adev->ras_hw_enabled, adev->ras_enabled);
2690
2691         return 0;
2692 release_con:
2693         amdgpu_ras_set_context(adev, NULL);
2694         kfree(con);
2695
2696         return r;
2697 }
2698
2699 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2700 {
2701         if (adev->gmc.xgmi.connected_to_cpu ||
2702             adev->gmc.is_app_apu)
2703                 return 1;
2704         return 0;
2705 }
2706
2707 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2708                                         struct ras_common_if *ras_block)
2709 {
2710         struct ras_query_if info = {
2711                 .head = *ras_block,
2712         };
2713
2714         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2715                 return 0;
2716
2717         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2718                 DRM_WARN("RAS init harvest failure");
2719
2720         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2721                 DRM_WARN("RAS init harvest reset failure");
2722
2723         return 0;
2724 }
2725
2726 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2727 {
2728        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2729
2730        if (!con)
2731                return false;
2732
2733        return con->poison_supported;
2734 }
2735
2736 /* helper function to handle common stuff in ip late init phase */
2737 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2738                          struct ras_common_if *ras_block)
2739 {
2740         struct amdgpu_ras_block_object *ras_obj = NULL;
2741         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2742         struct ras_query_if *query_info;
2743         unsigned long ue_count, ce_count;
2744         int r;
2745
2746         /* disable RAS feature per IP block if it is not supported */
2747         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2748                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2749                 return 0;
2750         }
2751
2752         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2753         if (r) {
2754                 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2755                         /* in resume phase, if fail to enable ras,
2756                          * clean up all ras fs nodes, and disable ras */
2757                         goto cleanup;
2758                 } else
2759                         return r;
2760         }
2761
2762         /* check for errors on warm reset edc persisant supported ASIC */
2763         amdgpu_persistent_edc_harvesting(adev, ras_block);
2764
2765         /* in resume phase, no need to create ras fs node */
2766         if (adev->in_suspend || amdgpu_in_reset(adev))
2767                 return 0;
2768
2769         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2770         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2771             (ras_obj->hw_ops->query_poison_status ||
2772             ras_obj->hw_ops->handle_poison_consumption))) {
2773                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2774                 if (r)
2775                         goto cleanup;
2776         }
2777
2778         if (ras_obj->hw_ops &&
2779             (ras_obj->hw_ops->query_ras_error_count ||
2780              ras_obj->hw_ops->query_ras_error_status)) {
2781                 r = amdgpu_ras_sysfs_create(adev, ras_block);
2782                 if (r)
2783                         goto interrupt;
2784
2785                 /* Those are the cached values at init.
2786                  */
2787                 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
2788                 if (!query_info)
2789                         return -ENOMEM;
2790                 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2791
2792                 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2793                         atomic_set(&con->ras_ce_count, ce_count);
2794                         atomic_set(&con->ras_ue_count, ue_count);
2795                 }
2796
2797                 kfree(query_info);
2798         }
2799
2800         return 0;
2801
2802 interrupt:
2803         if (ras_obj->ras_cb)
2804                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2805 cleanup:
2806         amdgpu_ras_feature_enable(adev, ras_block, 0);
2807         return r;
2808 }
2809
2810 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2811                          struct ras_common_if *ras_block)
2812 {
2813         return amdgpu_ras_block_late_init(adev, ras_block);
2814 }
2815
2816 /* helper function to remove ras fs node and interrupt handler */
2817 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2818                           struct ras_common_if *ras_block)
2819 {
2820         struct amdgpu_ras_block_object *ras_obj;
2821         if (!ras_block)
2822                 return;
2823
2824         amdgpu_ras_sysfs_remove(adev, ras_block);
2825
2826         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2827         if (ras_obj->ras_cb)
2828                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2829 }
2830
2831 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2832                           struct ras_common_if *ras_block)
2833 {
2834         return amdgpu_ras_block_late_fini(adev, ras_block);
2835 }
2836
2837 /* do some init work after IP late init as dependence.
2838  * and it runs in resume/gpu reset/booting up cases.
2839  */
2840 void amdgpu_ras_resume(struct amdgpu_device *adev)
2841 {
2842         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2843         struct ras_manager *obj, *tmp;
2844
2845         if (!adev->ras_enabled || !con) {
2846                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2847                 amdgpu_release_ras_context(adev);
2848
2849                 return;
2850         }
2851
2852         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2853                 /* Set up all other IPs which are not implemented. There is a
2854                  * tricky thing that IP's actual ras error type should be
2855                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2856                  * ERROR_NONE make sense anyway.
2857                  */
2858                 amdgpu_ras_enable_all_features(adev, 1);
2859
2860                 /* We enable ras on all hw_supported block, but as boot
2861                  * parameter might disable some of them and one or more IP has
2862                  * not implemented yet. So we disable them on behalf.
2863                  */
2864                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2865                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2866                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2867                                 /* there should be no any reference. */
2868                                 WARN_ON(alive_obj(obj));
2869                         }
2870                 }
2871         }
2872 }
2873
2874 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2875 {
2876         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2877
2878         if (!adev->ras_enabled || !con)
2879                 return;
2880
2881         amdgpu_ras_disable_all_features(adev, 0);
2882         /* Make sure all ras objects are disabled. */
2883         if (con->features)
2884                 amdgpu_ras_disable_all_features(adev, 1);
2885 }
2886
2887 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2888 {
2889         struct amdgpu_ras_block_list *node, *tmp;
2890         struct amdgpu_ras_block_object *obj;
2891         int r;
2892
2893         /* Guest side doesn't need init ras feature */
2894         if (amdgpu_sriov_vf(adev))
2895                 return 0;
2896
2897         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2898                 if (!node->ras_obj) {
2899                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2900                         continue;
2901                 }
2902
2903                 obj = node->ras_obj;
2904                 if (obj->ras_late_init) {
2905                         r = obj->ras_late_init(adev, &obj->ras_comm);
2906                         if (r) {
2907                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2908                                         obj->ras_comm.name, r);
2909                                 return r;
2910                         }
2911                 } else
2912                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2913         }
2914
2915         return 0;
2916 }
2917
2918 /* do some fini work before IP fini as dependence */
2919 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2920 {
2921         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2922
2923         if (!adev->ras_enabled || !con)
2924                 return 0;
2925
2926
2927         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2928         if (con->features)
2929                 amdgpu_ras_disable_all_features(adev, 0);
2930         amdgpu_ras_recovery_fini(adev);
2931         return 0;
2932 }
2933
2934 int amdgpu_ras_fini(struct amdgpu_device *adev)
2935 {
2936         struct amdgpu_ras_block_list *ras_node, *tmp;
2937         struct amdgpu_ras_block_object *obj = NULL;
2938         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2939
2940         if (!adev->ras_enabled || !con)
2941                 return 0;
2942
2943         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2944                 if (ras_node->ras_obj) {
2945                         obj = ras_node->ras_obj;
2946                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2947                             obj->ras_fini)
2948                                 obj->ras_fini(adev, &obj->ras_comm);
2949                         else
2950                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2951                 }
2952
2953                 /* Clear ras blocks from ras_list and free ras block list node */
2954                 list_del(&ras_node->node);
2955                 kfree(ras_node);
2956         }
2957
2958         amdgpu_ras_fs_fini(adev);
2959         amdgpu_ras_interrupt_remove_all(adev);
2960
2961         WARN(con->features, "Feature mask is not cleared");
2962
2963         if (con->features)
2964                 amdgpu_ras_disable_all_features(adev, 1);
2965
2966         cancel_delayed_work_sync(&con->ras_counte_delay_work);
2967
2968         amdgpu_ras_set_context(adev, NULL);
2969         kfree(con);
2970
2971         return 0;
2972 }
2973
2974 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2975 {
2976         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2977                 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2978
2979                 dev_info(adev->dev, "uncorrectable hardware error"
2980                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2981
2982                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2983                 amdgpu_ras_reset_gpu(adev);
2984         }
2985 }
2986
2987 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2988 {
2989         if (adev->asic_type == CHIP_VEGA20 &&
2990             adev->pm.fw_version <= 0x283400) {
2991                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2992                                 amdgpu_ras_intr_triggered();
2993         }
2994
2995         return false;
2996 }
2997
2998 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2999 {
3000         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3001
3002         if (!con)
3003                 return;
3004
3005         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3006                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3007                 amdgpu_ras_set_context(adev, NULL);
3008                 kfree(con);
3009         }
3010 }
3011
3012 #ifdef CONFIG_X86_MCE_AMD
3013 static struct amdgpu_device *find_adev(uint32_t node_id)
3014 {
3015         int i;
3016         struct amdgpu_device *adev = NULL;
3017
3018         for (i = 0; i < mce_adev_list.num_gpu; i++) {
3019                 adev = mce_adev_list.devs[i];
3020
3021                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3022                     adev->gmc.xgmi.physical_node_id == node_id)
3023                         break;
3024                 adev = NULL;
3025         }
3026
3027         return adev;
3028 }
3029
3030 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
3031 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
3032 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3033 #define GPU_ID_OFFSET           8
3034
3035 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3036                                     unsigned long val, void *data)
3037 {
3038         struct mce *m = (struct mce *)data;
3039         struct amdgpu_device *adev = NULL;
3040         uint32_t gpu_id = 0;
3041         uint32_t umc_inst = 0, ch_inst = 0;
3042
3043         /*
3044          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3045          * and error occurred in DramECC (Extended error code = 0) then only
3046          * process the error, else bail out.
3047          */
3048         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3049                     (XEC(m->status, 0x3f) == 0x0)))
3050                 return NOTIFY_DONE;
3051
3052         /*
3053          * If it is correctable error, return.
3054          */
3055         if (mce_is_correctable(m))
3056                 return NOTIFY_OK;
3057
3058         /*
3059          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3060          */
3061         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3062
3063         adev = find_adev(gpu_id);
3064         if (!adev) {
3065                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3066                                                                 gpu_id);
3067                 return NOTIFY_DONE;
3068         }
3069
3070         /*
3071          * If it is uncorrectable error, then find out UMC instance and
3072          * channel index.
3073          */
3074         umc_inst = GET_UMC_INST(m->ipid);
3075         ch_inst = GET_CHAN_INDEX(m->ipid);
3076
3077         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3078                              umc_inst, ch_inst);
3079
3080         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3081                 return NOTIFY_OK;
3082         else
3083                 return NOTIFY_DONE;
3084 }
3085
3086 static struct notifier_block amdgpu_bad_page_nb = {
3087         .notifier_call  = amdgpu_bad_page_notifier,
3088         .priority       = MCE_PRIO_UC,
3089 };
3090
3091 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3092 {
3093         /*
3094          * Add the adev to the mce_adev_list.
3095          * During mode2 reset, amdgpu device is temporarily
3096          * removed from the mgpu_info list which can cause
3097          * page retirement to fail.
3098          * Use this list instead of mgpu_info to find the amdgpu
3099          * device on which the UMC error was reported.
3100          */
3101         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3102
3103         /*
3104          * Register the x86 notifier only once
3105          * with MCE subsystem.
3106          */
3107         if (notifier_registered == false) {
3108                 mce_register_decode_chain(&amdgpu_bad_page_nb);
3109                 notifier_registered = true;
3110         }
3111 }
3112 #endif
3113
3114 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3115 {
3116         if (!adev)
3117                 return NULL;
3118
3119         return adev->psp.ras_context.ras;
3120 }
3121
3122 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3123 {
3124         if (!adev)
3125                 return -EINVAL;
3126
3127         adev->psp.ras_context.ras = ras_con;
3128         return 0;
3129 }
3130
3131 /* check if ras is supported on block, say, sdma, gfx */
3132 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3133                 unsigned int block)
3134 {
3135         int ret = 0;
3136         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3137
3138         if (block >= AMDGPU_RAS_BLOCK_COUNT)
3139                 return 0;
3140
3141         ret = ras && (adev->ras_enabled & (1 << block));
3142
3143         /* For the special asic with mem ecc enabled but sram ecc
3144          * not enabled, even if the ras block is not supported on
3145          * .ras_enabled, if the asic supports poison mode and the
3146          * ras block has ras configuration, it can be considered
3147          * that the ras block supports ras function.
3148          */
3149         if (!ret &&
3150             (block == AMDGPU_RAS_BLOCK__GFX ||
3151              block == AMDGPU_RAS_BLOCK__SDMA ||
3152              block == AMDGPU_RAS_BLOCK__VCN ||
3153              block == AMDGPU_RAS_BLOCK__JPEG) &&
3154             amdgpu_ras_is_poison_mode_supported(adev) &&
3155             amdgpu_ras_get_ras_block(adev, block, 0))
3156                 ret = 1;
3157
3158         return ret;
3159 }
3160
3161 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3162 {
3163         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3164
3165         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3166                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3167         return 0;
3168 }
3169
3170
3171 /* Register each ip ras block into amdgpu ras */
3172 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3173                 struct amdgpu_ras_block_object *ras_block_obj)
3174 {
3175         struct amdgpu_ras_block_list *ras_node;
3176         if (!adev || !ras_block_obj)
3177                 return -EINVAL;
3178
3179         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3180         if (!ras_node)
3181                 return -ENOMEM;
3182
3183         INIT_LIST_HEAD(&ras_node->node);
3184         ras_node->ras_obj = ras_block_obj;
3185         list_add_tail(&ras_node->node, &adev->ras_list);
3186
3187         return 0;
3188 }
3189
3190 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3191 {
3192         if (!err_type_name)
3193                 return;
3194
3195         switch (err_type) {
3196         case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3197                 sprintf(err_type_name, "correctable");
3198                 break;
3199         case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3200                 sprintf(err_type_name, "uncorrectable");
3201                 break;
3202         default:
3203                 sprintf(err_type_name, "unknown");
3204                 break;
3205         }
3206 }
3207
3208 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3209                                          const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3210                                          uint32_t instance,
3211                                          uint32_t *memory_id)
3212 {
3213         uint32_t err_status_lo_data, err_status_lo_offset;
3214
3215         if (!reg_entry)
3216                 return false;
3217
3218         err_status_lo_offset =
3219                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3220                                             reg_entry->seg_lo, reg_entry->reg_lo);
3221         err_status_lo_data = RREG32(err_status_lo_offset);
3222
3223         if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3224             !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3225                 return false;
3226
3227         *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3228
3229         return true;
3230 }
3231
3232 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3233                                        const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3234                                        uint32_t instance,
3235                                        unsigned long *err_cnt)
3236 {
3237         uint32_t err_status_hi_data, err_status_hi_offset;
3238
3239         if (!reg_entry)
3240                 return false;
3241
3242         err_status_hi_offset =
3243                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3244                                             reg_entry->seg_hi, reg_entry->reg_hi);
3245         err_status_hi_data = RREG32(err_status_hi_offset);
3246
3247         if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3248             !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3249                 /* keep the check here in case we need to refer to the result later */
3250                 dev_dbg(adev->dev, "Invalid err_info field\n");
3251
3252         /* read err count */
3253         *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3254
3255         return true;
3256 }
3257
3258 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3259                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
3260                                            uint32_t reg_list_size,
3261                                            const struct amdgpu_ras_memory_id_entry *mem_list,
3262                                            uint32_t mem_list_size,
3263                                            uint32_t instance,
3264                                            uint32_t err_type,
3265                                            unsigned long *err_count)
3266 {
3267         uint32_t memory_id;
3268         unsigned long err_cnt;
3269         char err_type_name[16];
3270         uint32_t i, j;
3271
3272         for (i = 0; i < reg_list_size; i++) {
3273                 /* query memory_id from err_status_lo */
3274                 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
3275                                                          instance, &memory_id))
3276                         continue;
3277
3278                 /* query err_cnt from err_status_hi */
3279                 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
3280                                                        instance, &err_cnt) ||
3281                     !err_cnt)
3282                         continue;
3283
3284                 *err_count += err_cnt;
3285
3286                 /* log the errors */
3287                 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3288                 if (!mem_list) {
3289                         /* memory_list is not supported */
3290                         dev_info(adev->dev,
3291                                  "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3292                                  err_cnt, err_type_name,
3293                                  reg_list[i].block_name,
3294                                  instance, memory_id);
3295                 } else {
3296                         for (j = 0; j < mem_list_size; j++) {
3297                                 if (memory_id == mem_list[j].memory_id) {
3298                                         dev_info(adev->dev,
3299                                                  "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3300                                                  err_cnt, err_type_name,
3301                                                  reg_list[i].block_name,
3302                                                  instance, mem_list[j].name);
3303                                         break;
3304                                 }
3305                         }
3306                 }
3307         }
3308 }
3309
3310 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3311                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
3312                                            uint32_t reg_list_size,
3313                                            uint32_t instance)
3314 {
3315         uint32_t err_status_lo_offset, err_status_hi_offset;
3316         uint32_t i;
3317
3318         for (i = 0; i < reg_list_size; i++) {
3319                 err_status_lo_offset =
3320                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3321                                                     reg_list[i].seg_lo, reg_list[i].reg_lo);
3322                 err_status_hi_offset =
3323                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3324                                                     reg_list[i].seg_hi, reg_list[i].reg_hi);
3325                 WREG32(err_status_lo_offset, 0);
3326                 WREG32(err_status_hi_offset, 0);
3327         }
3328 }