Merge drm/drm-next into drm-misc-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_xgmi.h"
35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
36 #include "atom.h"
37
38 static const char *RAS_FS_NAME = "ras";
39
40 const char *ras_error_string[] = {
41         "none",
42         "parity",
43         "single_correctable",
44         "multi_uncorrectable",
45         "poison",
46 };
47
48 const char *ras_block_string[] = {
49         "umc",
50         "sdma",
51         "gfx",
52         "mmhub",
53         "athub",
54         "pcie_bif",
55         "hdp",
56         "xgmi_wafl",
57         "df",
58         "smn",
59         "sem",
60         "mp0",
61         "mp1",
62         "fuse",
63 };
64
65 #define ras_err_str(i) (ras_error_string[ffs(i)])
66 #define ras_block_str(i) (ras_block_string[i])
67
68 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
69
70 /* inject address is 52 bits */
71 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
72
73 /* typical ECC bad page rate(1 bad page per 100MB VRAM) */
74 #define RAS_BAD_PAGE_RATE               (100 * 1024 * 1024ULL)
75
76 enum amdgpu_ras_retire_page_reservation {
77         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
78         AMDGPU_RAS_RETIRE_PAGE_PENDING,
79         AMDGPU_RAS_RETIRE_PAGE_FAULT,
80 };
81
82 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
83
84 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
85                                 uint64_t addr);
86 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
87                                 uint64_t addr);
88
89 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
90 {
91         if (adev && amdgpu_ras_get_context(adev))
92                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
93 }
94
95 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
96 {
97         if (adev && amdgpu_ras_get_context(adev))
98                 return amdgpu_ras_get_context(adev)->error_query_ready;
99
100         return false;
101 }
102
103 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
104 {
105         struct ras_err_data err_data = {0, 0, 0, NULL};
106         struct eeprom_table_record err_rec;
107
108         if ((address >= adev->gmc.mc_vram_size) ||
109             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
110                 dev_warn(adev->dev,
111                          "RAS WARN: input address 0x%llx is invalid.\n",
112                          address);
113                 return -EINVAL;
114         }
115
116         if (amdgpu_ras_check_bad_page(adev, address)) {
117                 dev_warn(adev->dev,
118                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
119                          address);
120                 return 0;
121         }
122
123         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
124
125         err_rec.address = address;
126         err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
127         err_rec.ts = (uint64_t)ktime_get_real_seconds();
128         err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
129
130         err_data.err_addr = &err_rec;
131         err_data.err_addr_cnt = 1;
132
133         if (amdgpu_bad_page_threshold != 0) {
134                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
135                                          err_data.err_addr_cnt);
136                 amdgpu_ras_save_bad_pages(adev);
137         }
138
139         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
140         dev_warn(adev->dev, "Clear EEPROM:\n");
141         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
142
143         return 0;
144 }
145
146 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
147                                         size_t size, loff_t *pos)
148 {
149         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
150         struct ras_query_if info = {
151                 .head = obj->head,
152         };
153         ssize_t s;
154         char val[128];
155
156         if (amdgpu_ras_query_error_status(obj->adev, &info))
157                 return -EINVAL;
158
159         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
160                         "ue", info.ue_count,
161                         "ce", info.ce_count);
162         if (*pos >= s)
163                 return 0;
164
165         s -= *pos;
166         s = min_t(u64, s, size);
167
168
169         if (copy_to_user(buf, &val[*pos], s))
170                 return -EINVAL;
171
172         *pos += s;
173
174         return s;
175 }
176
177 static const struct file_operations amdgpu_ras_debugfs_ops = {
178         .owner = THIS_MODULE,
179         .read = amdgpu_ras_debugfs_read,
180         .write = NULL,
181         .llseek = default_llseek
182 };
183
184 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
185 {
186         int i;
187
188         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
189                 *block_id = i;
190                 if (strcmp(name, ras_block_str(i)) == 0)
191                         return 0;
192         }
193         return -EINVAL;
194 }
195
196 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
197                 const char __user *buf, size_t size,
198                 loff_t *pos, struct ras_debug_if *data)
199 {
200         ssize_t s = min_t(u64, 64, size);
201         char str[65];
202         char block_name[33];
203         char err[9] = "ue";
204         int op = -1;
205         int block_id;
206         uint32_t sub_block;
207         u64 address, value;
208
209         if (*pos)
210                 return -EINVAL;
211         *pos = size;
212
213         memset(str, 0, sizeof(str));
214         memset(data, 0, sizeof(*data));
215
216         if (copy_from_user(str, buf, s))
217                 return -EINVAL;
218
219         if (sscanf(str, "disable %32s", block_name) == 1)
220                 op = 0;
221         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
222                 op = 1;
223         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
224                 op = 2;
225         else if (strstr(str, "retire_page") != NULL)
226                 op = 3;
227         else if (str[0] && str[1] && str[2] && str[3])
228                 /* ascii string, but commands are not matched. */
229                 return -EINVAL;
230
231         if (op != -1) {
232                 if (op == 3) {
233                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
234                             sscanf(str, "%*s %llu", &address) != 1)
235                                 return -EINVAL;
236
237                         data->op = op;
238                         data->inject.address = address;
239
240                         return 0;
241                 }
242
243                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
244                         return -EINVAL;
245
246                 data->head.block = block_id;
247                 /* only ue and ce errors are supported */
248                 if (!memcmp("ue", err, 2))
249                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
250                 else if (!memcmp("ce", err, 2))
251                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
252                 else
253                         return -EINVAL;
254
255                 data->op = op;
256
257                 if (op == 2) {
258                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
259                                    &sub_block, &address, &value) != 3 &&
260                             sscanf(str, "%*s %*s %*s %u %llu %llu",
261                                    &sub_block, &address, &value) != 3)
262                                 return -EINVAL;
263                         data->head.sub_block_index = sub_block;
264                         data->inject.address = address;
265                         data->inject.value = value;
266                 }
267         } else {
268                 if (size < sizeof(*data))
269                         return -EINVAL;
270
271                 if (copy_from_user(data, buf, sizeof(*data)))
272                         return -EINVAL;
273         }
274
275         return 0;
276 }
277
278 /**
279  * DOC: AMDGPU RAS debugfs control interface
280  *
281  * The control interface accepts struct ras_debug_if which has two members.
282  *
283  * First member: ras_debug_if::head or ras_debug_if::inject.
284  *
285  * head is used to indicate which IP block will be under control.
286  *
287  * head has four members, they are block, type, sub_block_index, name.
288  * block: which IP will be under control.
289  * type: what kind of error will be enabled/disabled/injected.
290  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
291  * name: the name of IP.
292  *
293  * inject has two more members than head, they are address, value.
294  * As their names indicate, inject operation will write the
295  * value to the address.
296  *
297  * The second member: struct ras_debug_if::op.
298  * It has three kinds of operations.
299  *
300  * - 0: disable RAS on the block. Take ::head as its data.
301  * - 1: enable RAS on the block. Take ::head as its data.
302  * - 2: inject errors on the block. Take ::inject as its data.
303  *
304  * How to use the interface?
305  *
306  * In a program
307  *
308  * Copy the struct ras_debug_if in your code and initialize it.
309  * Write the struct to the control interface.
310  *
311  * From shell
312  *
313  * .. code-block:: bash
314  *
315  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
316  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
317  *      echo "inject  <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
318  *
319  * Where N, is the card which you want to affect.
320  *
321  * "disable" requires only the block.
322  * "enable" requires the block and error type.
323  * "inject" requires the block, error type, address, and value.
324  *
325  * The block is one of: umc, sdma, gfx, etc.
326  *      see ras_block_string[] for details
327  *
328  * The error type is one of: ue, ce, where,
329  *      ue is multi-uncorrectable
330  *      ce is single-correctable
331  *
332  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
333  * The address and value are hexadecimal numbers, leading 0x is optional.
334  *
335  * For instance,
336  *
337  * .. code-block:: bash
338  *
339  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
340  *      echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
341  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
342  *
343  * How to check the result of the operation?
344  *
345  * To check disable/enable, see "ras" features at,
346  * /sys/class/drm/card[0/1/2...]/device/ras/features
347  *
348  * To check inject, see the corresponding error count at,
349  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
350  *
351  * .. note::
352  *      Operations are only allowed on blocks which are supported.
353  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
354  *      to see which blocks support RAS on a particular asic.
355  *
356  */
357 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
358                 size_t size, loff_t *pos)
359 {
360         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
361         struct ras_debug_if data;
362         int ret = 0;
363
364         if (!amdgpu_ras_get_error_query_ready(adev)) {
365                 dev_warn(adev->dev, "RAS WARN: error injection "
366                                 "currently inaccessible\n");
367                 return size;
368         }
369
370         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
371         if (ret)
372                 return -EINVAL;
373
374         if (data.op == 3) {
375                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
376                 if (!ret)
377                         return size;
378                 else
379                         return ret;
380         }
381
382         if (!amdgpu_ras_is_supported(adev, data.head.block))
383                 return -EINVAL;
384
385         switch (data.op) {
386         case 0:
387                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
388                 break;
389         case 1:
390                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
391                 break;
392         case 2:
393                 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
394                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
395                         dev_warn(adev->dev, "RAS WARN: input address "
396                                         "0x%llx is invalid.",
397                                         data.inject.address);
398                         ret = -EINVAL;
399                         break;
400                 }
401
402                 /* umc ce/ue error injection for a bad page is not allowed */
403                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
404                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
405                         dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
406                                         "as bad before error injection!\n",
407                                         data.inject.address);
408                         break;
409                 }
410
411                 /* data.inject.address is offset instead of absolute gpu address */
412                 ret = amdgpu_ras_error_inject(adev, &data.inject);
413                 break;
414         default:
415                 ret = -EINVAL;
416                 break;
417         }
418
419         if (ret)
420                 return -EINVAL;
421
422         return size;
423 }
424
425 /**
426  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
427  *
428  * Some boards contain an EEPROM which is used to persistently store a list of
429  * bad pages which experiences ECC errors in vram.  This interface provides
430  * a way to reset the EEPROM, e.g., after testing error injection.
431  *
432  * Usage:
433  *
434  * .. code-block:: bash
435  *
436  *      echo 1 > ../ras/ras_eeprom_reset
437  *
438  * will reset EEPROM table to 0 entries.
439  *
440  */
441 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
442                 size_t size, loff_t *pos)
443 {
444         struct amdgpu_device *adev =
445                 (struct amdgpu_device *)file_inode(f)->i_private;
446         int ret;
447
448         ret = amdgpu_ras_eeprom_reset_table(
449                         &(amdgpu_ras_get_context(adev)->eeprom_control));
450
451         if (ret == 1) {
452                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
453                 return size;
454         } else {
455                 return -EIO;
456         }
457 }
458
459 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
460         .owner = THIS_MODULE,
461         .read = NULL,
462         .write = amdgpu_ras_debugfs_ctrl_write,
463         .llseek = default_llseek
464 };
465
466 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
467         .owner = THIS_MODULE,
468         .read = NULL,
469         .write = amdgpu_ras_debugfs_eeprom_write,
470         .llseek = default_llseek
471 };
472
473 /**
474  * DOC: AMDGPU RAS sysfs Error Count Interface
475  *
476  * It allows the user to read the error count for each IP block on the gpu through
477  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
478  *
479  * It outputs the multiple lines which report the uncorrected (ue) and corrected
480  * (ce) error counts.
481  *
482  * The format of one line is below,
483  *
484  * [ce|ue]: count
485  *
486  * Example:
487  *
488  * .. code-block:: bash
489  *
490  *      ue: 0
491  *      ce: 1
492  *
493  */
494 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
495                 struct device_attribute *attr, char *buf)
496 {
497         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
498         struct ras_query_if info = {
499                 .head = obj->head,
500         };
501
502         if (!amdgpu_ras_get_error_query_ready(obj->adev))
503                 return sysfs_emit(buf, "Query currently inaccessible\n");
504
505         if (amdgpu_ras_query_error_status(obj->adev, &info))
506                 return -EINVAL;
507
508
509         if (obj->adev->asic_type == CHIP_ALDEBARAN) {
510                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
511                         DRM_WARN("Failed to reset error counter and error status");
512         }
513
514         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
515                           "ce", info.ce_count);
516 }
517
518 /* obj begin */
519
520 #define get_obj(obj) do { (obj)->use++; } while (0)
521 #define alive_obj(obj) ((obj)->use)
522
523 static inline void put_obj(struct ras_manager *obj)
524 {
525         if (obj && (--obj->use == 0))
526                 list_del(&obj->node);
527         if (obj && (obj->use < 0))
528                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
529 }
530
531 /* make one obj and return it. */
532 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
533                 struct ras_common_if *head)
534 {
535         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
536         struct ras_manager *obj;
537
538         if (!adev->ras_enabled || !con)
539                 return NULL;
540
541         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
542                 return NULL;
543
544         obj = &con->objs[head->block];
545         /* already exist. return obj? */
546         if (alive_obj(obj))
547                 return NULL;
548
549         obj->head = *head;
550         obj->adev = adev;
551         list_add(&obj->node, &con->head);
552         get_obj(obj);
553
554         return obj;
555 }
556
557 /* return an obj equal to head, or the first when head is NULL */
558 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
559                 struct ras_common_if *head)
560 {
561         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
562         struct ras_manager *obj;
563         int i;
564
565         if (!adev->ras_enabled || !con)
566                 return NULL;
567
568         if (head) {
569                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
570                         return NULL;
571
572                 obj = &con->objs[head->block];
573
574                 if (alive_obj(obj)) {
575                         WARN_ON(head->block != obj->head.block);
576                         return obj;
577                 }
578         } else {
579                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
580                         obj = &con->objs[i];
581                         if (alive_obj(obj)) {
582                                 WARN_ON(i != obj->head.block);
583                                 return obj;
584                         }
585                 }
586         }
587
588         return NULL;
589 }
590 /* obj end */
591
592 /* feature ctl begin */
593 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
594                                          struct ras_common_if *head)
595 {
596         return adev->ras_hw_enabled & BIT(head->block);
597 }
598
599 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
600                 struct ras_common_if *head)
601 {
602         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
603
604         return con->features & BIT(head->block);
605 }
606
607 /*
608  * if obj is not created, then create one.
609  * set feature enable flag.
610  */
611 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
612                 struct ras_common_if *head, int enable)
613 {
614         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
615         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
616
617         /* If hardware does not support ras, then do not create obj.
618          * But if hardware support ras, we can create the obj.
619          * Ras framework checks con->hw_supported to see if it need do
620          * corresponding initialization.
621          * IP checks con->support to see if it need disable ras.
622          */
623         if (!amdgpu_ras_is_feature_allowed(adev, head))
624                 return 0;
625         if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
626                 return 0;
627
628         if (enable) {
629                 if (!obj) {
630                         obj = amdgpu_ras_create_obj(adev, head);
631                         if (!obj)
632                                 return -EINVAL;
633                 } else {
634                         /* In case we create obj somewhere else */
635                         get_obj(obj);
636                 }
637                 con->features |= BIT(head->block);
638         } else {
639                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
640                         con->features &= ~BIT(head->block);
641                         put_obj(obj);
642                 }
643         }
644
645         return 0;
646 }
647
648 /* wrapper of psp_ras_enable_features */
649 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
650                 struct ras_common_if *head, bool enable)
651 {
652         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
653         union ta_ras_cmd_input *info;
654         int ret;
655
656         if (!con)
657                 return -EINVAL;
658
659         info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
660         if (!info)
661                 return -ENOMEM;
662
663         if (!enable) {
664                 info->disable_features = (struct ta_ras_disable_features_input) {
665                         .block_id =  amdgpu_ras_block_to_ta(head->block),
666                         .error_type = amdgpu_ras_error_to_ta(head->type),
667                 };
668         } else {
669                 info->enable_features = (struct ta_ras_enable_features_input) {
670                         .block_id =  amdgpu_ras_block_to_ta(head->block),
671                         .error_type = amdgpu_ras_error_to_ta(head->type),
672                 };
673         }
674
675         /* Do not enable if it is not allowed. */
676         WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
677         /* Are we alerady in that state we are going to set? */
678         if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
679                 ret = 0;
680                 goto out;
681         }
682
683         if (!amdgpu_ras_intr_triggered()) {
684                 ret = psp_ras_enable_features(&adev->psp, info, enable);
685                 if (ret) {
686                         dev_err(adev->dev, "ras %s %s failed %d\n",
687                                 enable ? "enable":"disable",
688                                 ras_block_str(head->block),
689                                 ret);
690                         goto out;
691                 }
692         }
693
694         /* setup the obj */
695         __amdgpu_ras_feature_enable(adev, head, enable);
696         ret = 0;
697 out:
698         kfree(info);
699         return ret;
700 }
701
702 /* Only used in device probe stage and called only once. */
703 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
704                 struct ras_common_if *head, bool enable)
705 {
706         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
707         int ret;
708
709         if (!con)
710                 return -EINVAL;
711
712         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
713                 if (enable) {
714                         /* There is no harm to issue a ras TA cmd regardless of
715                          * the currecnt ras state.
716                          * If current state == target state, it will do nothing
717                          * But sometimes it requests driver to reset and repost
718                          * with error code -EAGAIN.
719                          */
720                         ret = amdgpu_ras_feature_enable(adev, head, 1);
721                         /* With old ras TA, we might fail to enable ras.
722                          * Log it and just setup the object.
723                          * TODO need remove this WA in the future.
724                          */
725                         if (ret == -EINVAL) {
726                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
727                                 if (!ret)
728                                         dev_info(adev->dev,
729                                                 "RAS INFO: %s setup object\n",
730                                                 ras_block_str(head->block));
731                         }
732                 } else {
733                         /* setup the object then issue a ras TA disable cmd.*/
734                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
735                         if (ret)
736                                 return ret;
737
738                         /* gfx block ras dsiable cmd must send to ras-ta */
739                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
740                                 con->features |= BIT(head->block);
741
742                         ret = amdgpu_ras_feature_enable(adev, head, 0);
743
744                         /* clean gfx block ras features flag */
745                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
746                                 con->features &= ~BIT(head->block);
747                 }
748         } else
749                 ret = amdgpu_ras_feature_enable(adev, head, enable);
750
751         return ret;
752 }
753
754 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
755                 bool bypass)
756 {
757         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
758         struct ras_manager *obj, *tmp;
759
760         list_for_each_entry_safe(obj, tmp, &con->head, node) {
761                 /* bypass psp.
762                  * aka just release the obj and corresponding flags
763                  */
764                 if (bypass) {
765                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
766                                 break;
767                 } else {
768                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
769                                 break;
770                 }
771         }
772
773         return con->features;
774 }
775
776 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
777                 bool bypass)
778 {
779         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
780         int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
781         int i;
782         const enum amdgpu_ras_error_type default_ras_type =
783                 AMDGPU_RAS_ERROR__NONE;
784
785         for (i = 0; i < ras_block_count; i++) {
786                 struct ras_common_if head = {
787                         .block = i,
788                         .type = default_ras_type,
789                         .sub_block_index = 0,
790                 };
791                 strcpy(head.name, ras_block_str(i));
792                 if (bypass) {
793                         /*
794                          * bypass psp. vbios enable ras for us.
795                          * so just create the obj
796                          */
797                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
798                                 break;
799                 } else {
800                         if (amdgpu_ras_feature_enable(adev, &head, 1))
801                                 break;
802                 }
803         }
804
805         return con->features;
806 }
807 /* feature ctl end */
808
809 /* query/inject/cure begin */
810 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
811         struct ras_query_if *info)
812 {
813         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
814         struct ras_err_data err_data = {0, 0, 0, NULL};
815         int i;
816
817         if (!obj)
818                 return -EINVAL;
819
820         switch (info->head.block) {
821         case AMDGPU_RAS_BLOCK__UMC:
822                 if (adev->umc.ras_funcs &&
823                     adev->umc.ras_funcs->query_ras_error_count)
824                         adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
825                 /* umc query_ras_error_address is also responsible for clearing
826                  * error status
827                  */
828                 if (adev->umc.ras_funcs &&
829                     adev->umc.ras_funcs->query_ras_error_address)
830                         adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
831                 break;
832         case AMDGPU_RAS_BLOCK__SDMA:
833                 if (adev->sdma.funcs->query_ras_error_count) {
834                         for (i = 0; i < adev->sdma.num_instances; i++)
835                                 adev->sdma.funcs->query_ras_error_count(adev, i,
836                                                                         &err_data);
837                 }
838                 break;
839         case AMDGPU_RAS_BLOCK__GFX:
840                 if (adev->gfx.ras_funcs &&
841                     adev->gfx.ras_funcs->query_ras_error_count)
842                         adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
843
844                 if (adev->gfx.ras_funcs &&
845                     adev->gfx.ras_funcs->query_ras_error_status)
846                         adev->gfx.ras_funcs->query_ras_error_status(adev);
847                 break;
848         case AMDGPU_RAS_BLOCK__MMHUB:
849                 if (adev->mmhub.ras_funcs &&
850                     adev->mmhub.ras_funcs->query_ras_error_count)
851                         adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
852
853                 if (adev->mmhub.ras_funcs &&
854                     adev->mmhub.ras_funcs->query_ras_error_status)
855                         adev->mmhub.ras_funcs->query_ras_error_status(adev);
856                 break;
857         case AMDGPU_RAS_BLOCK__PCIE_BIF:
858                 if (adev->nbio.ras_funcs &&
859                     adev->nbio.ras_funcs->query_ras_error_count)
860                         adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
861                 break;
862         case AMDGPU_RAS_BLOCK__XGMI_WAFL:
863                 if (adev->gmc.xgmi.ras_funcs &&
864                     adev->gmc.xgmi.ras_funcs->query_ras_error_count)
865                         adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
866                 break;
867         case AMDGPU_RAS_BLOCK__HDP:
868                 if (adev->hdp.ras_funcs &&
869                     adev->hdp.ras_funcs->query_ras_error_count)
870                         adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
871                 break;
872         default:
873                 break;
874         }
875
876         obj->err_data.ue_count += err_data.ue_count;
877         obj->err_data.ce_count += err_data.ce_count;
878
879         info->ue_count = obj->err_data.ue_count;
880         info->ce_count = obj->err_data.ce_count;
881
882         if (err_data.ce_count) {
883                 if (adev->smuio.funcs &&
884                     adev->smuio.funcs->get_socket_id &&
885                     adev->smuio.funcs->get_die_id) {
886                         dev_info(adev->dev, "socket: %d, die: %d "
887                                         "%ld correctable hardware errors "
888                                         "detected in %s block, no user "
889                                         "action is needed.\n",
890                                         adev->smuio.funcs->get_socket_id(adev),
891                                         adev->smuio.funcs->get_die_id(adev),
892                                         obj->err_data.ce_count,
893                                         ras_block_str(info->head.block));
894                 } else {
895                         dev_info(adev->dev, "%ld correctable hardware errors "
896                                         "detected in %s block, no user "
897                                         "action is needed.\n",
898                                         obj->err_data.ce_count,
899                                         ras_block_str(info->head.block));
900                 }
901         }
902         if (err_data.ue_count) {
903                 if (adev->smuio.funcs &&
904                     adev->smuio.funcs->get_socket_id &&
905                     adev->smuio.funcs->get_die_id) {
906                         dev_info(adev->dev, "socket: %d, die: %d "
907                                         "%ld uncorrectable hardware errors "
908                                         "detected in %s block\n",
909                                         adev->smuio.funcs->get_socket_id(adev),
910                                         adev->smuio.funcs->get_die_id(adev),
911                                         obj->err_data.ue_count,
912                                         ras_block_str(info->head.block));
913                 } else {
914                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
915                                         "detected in %s block\n",
916                                         obj->err_data.ue_count,
917                                         ras_block_str(info->head.block));
918                 }
919         }
920
921         return 0;
922 }
923
924 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
925                 enum amdgpu_ras_block block)
926 {
927         if (!amdgpu_ras_is_supported(adev, block))
928                 return -EINVAL;
929
930         switch (block) {
931         case AMDGPU_RAS_BLOCK__GFX:
932                 if (adev->gfx.ras_funcs &&
933                     adev->gfx.ras_funcs->reset_ras_error_count)
934                         adev->gfx.ras_funcs->reset_ras_error_count(adev);
935
936                 if (adev->gfx.ras_funcs &&
937                     adev->gfx.ras_funcs->reset_ras_error_status)
938                         adev->gfx.ras_funcs->reset_ras_error_status(adev);
939                 break;
940         case AMDGPU_RAS_BLOCK__MMHUB:
941                 if (adev->mmhub.ras_funcs &&
942                     adev->mmhub.ras_funcs->reset_ras_error_count)
943                         adev->mmhub.ras_funcs->reset_ras_error_count(adev);
944
945                 if (adev->mmhub.ras_funcs &&
946                     adev->mmhub.ras_funcs->reset_ras_error_status)
947                         adev->mmhub.ras_funcs->reset_ras_error_status(adev);
948                 break;
949         case AMDGPU_RAS_BLOCK__SDMA:
950                 if (adev->sdma.funcs->reset_ras_error_count)
951                         adev->sdma.funcs->reset_ras_error_count(adev);
952                 break;
953         case AMDGPU_RAS_BLOCK__HDP:
954                 if (adev->hdp.ras_funcs &&
955                     adev->hdp.ras_funcs->reset_ras_error_count)
956                         adev->hdp.ras_funcs->reset_ras_error_count(adev);
957                 break;
958         default:
959                 break;
960         }
961
962         return 0;
963 }
964
965 /* Trigger XGMI/WAFL error */
966 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
967                                  struct ta_ras_trigger_error_input *block_info)
968 {
969         int ret;
970
971         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
972                 dev_warn(adev->dev, "Failed to disallow df cstate");
973
974         if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
975                 dev_warn(adev->dev, "Failed to disallow XGMI power down");
976
977         ret = psp_ras_trigger_error(&adev->psp, block_info);
978
979         if (amdgpu_ras_intr_triggered())
980                 return ret;
981
982         if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
983                 dev_warn(adev->dev, "Failed to allow XGMI power down");
984
985         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
986                 dev_warn(adev->dev, "Failed to allow df cstate");
987
988         return ret;
989 }
990
991 /* wrapper of psp_ras_trigger_error */
992 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
993                 struct ras_inject_if *info)
994 {
995         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
996         struct ta_ras_trigger_error_input block_info = {
997                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
998                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
999                 .sub_block_index = info->head.sub_block_index,
1000                 .address = info->address,
1001                 .value = info->value,
1002         };
1003         int ret = 0;
1004
1005         if (!obj)
1006                 return -EINVAL;
1007
1008         /* Calculate XGMI relative offset */
1009         if (adev->gmc.xgmi.num_physical_nodes > 1) {
1010                 block_info.address =
1011                         amdgpu_xgmi_get_relative_phy_addr(adev,
1012                                                           block_info.address);
1013         }
1014
1015         switch (info->head.block) {
1016         case AMDGPU_RAS_BLOCK__GFX:
1017                 if (adev->gfx.ras_funcs &&
1018                     adev->gfx.ras_funcs->ras_error_inject)
1019                         ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
1020                 else
1021                         ret = -EINVAL;
1022                 break;
1023         case AMDGPU_RAS_BLOCK__UMC:
1024         case AMDGPU_RAS_BLOCK__SDMA:
1025         case AMDGPU_RAS_BLOCK__MMHUB:
1026         case AMDGPU_RAS_BLOCK__PCIE_BIF:
1027                 ret = psp_ras_trigger_error(&adev->psp, &block_info);
1028                 break;
1029         case AMDGPU_RAS_BLOCK__XGMI_WAFL:
1030                 ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
1031                 break;
1032         default:
1033                 dev_info(adev->dev, "%s error injection is not supported yet\n",
1034                          ras_block_str(info->head.block));
1035                 ret = -EINVAL;
1036         }
1037
1038         if (ret)
1039                 dev_err(adev->dev, "ras inject %s failed %d\n",
1040                         ras_block_str(info->head.block), ret);
1041
1042         return ret;
1043 }
1044
1045 /* get the total error counts on all IPs */
1046 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1047                 bool is_ce)
1048 {
1049         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1050         struct ras_manager *obj;
1051         struct ras_err_data data = {0, 0};
1052
1053         if (!adev->ras_enabled || !con)
1054                 return 0;
1055
1056         list_for_each_entry(obj, &con->head, node) {
1057                 struct ras_query_if info = {
1058                         .head = obj->head,
1059                 };
1060
1061                 if (amdgpu_ras_query_error_status(adev, &info))
1062                         return 0;
1063
1064                 data.ce_count += info.ce_count;
1065                 data.ue_count += info.ue_count;
1066         }
1067
1068         return is_ce ? data.ce_count : data.ue_count;
1069 }
1070 /* query/inject/cure end */
1071
1072
1073 /* sysfs begin */
1074
1075 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1076                 struct ras_badpage **bps, unsigned int *count);
1077
1078 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1079 {
1080         switch (flags) {
1081         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1082                 return "R";
1083         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1084                 return "P";
1085         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1086         default:
1087                 return "F";
1088         }
1089 }
1090
1091 /**
1092  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1093  *
1094  * It allows user to read the bad pages of vram on the gpu through
1095  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1096  *
1097  * It outputs multiple lines, and each line stands for one gpu page.
1098  *
1099  * The format of one line is below,
1100  * gpu pfn : gpu page size : flags
1101  *
1102  * gpu pfn and gpu page size are printed in hex format.
1103  * flags can be one of below character,
1104  *
1105  * R: reserved, this gpu page is reserved and not able to use.
1106  *
1107  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1108  * in next window of page_reserve.
1109  *
1110  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1111  *
1112  * Examples:
1113  *
1114  * .. code-block:: bash
1115  *
1116  *      0x00000001 : 0x00001000 : R
1117  *      0x00000002 : 0x00001000 : P
1118  *
1119  */
1120
1121 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1122                 struct kobject *kobj, struct bin_attribute *attr,
1123                 char *buf, loff_t ppos, size_t count)
1124 {
1125         struct amdgpu_ras *con =
1126                 container_of(attr, struct amdgpu_ras, badpages_attr);
1127         struct amdgpu_device *adev = con->adev;
1128         const unsigned int element_size =
1129                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1130         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1131         unsigned int end = div64_ul(ppos + count - 1, element_size);
1132         ssize_t s = 0;
1133         struct ras_badpage *bps = NULL;
1134         unsigned int bps_count = 0;
1135
1136         memset(buf, 0, count);
1137
1138         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1139                 return 0;
1140
1141         for (; start < end && start < bps_count; start++)
1142                 s += scnprintf(&buf[s], element_size + 1,
1143                                 "0x%08x : 0x%08x : %1s\n",
1144                                 bps[start].bp,
1145                                 bps[start].size,
1146                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1147
1148         kfree(bps);
1149
1150         return s;
1151 }
1152
1153 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1154                 struct device_attribute *attr, char *buf)
1155 {
1156         struct amdgpu_ras *con =
1157                 container_of(attr, struct amdgpu_ras, features_attr);
1158
1159         return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1160 }
1161
1162 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1163 {
1164         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1165
1166         sysfs_remove_file_from_group(&adev->dev->kobj,
1167                                 &con->badpages_attr.attr,
1168                                 RAS_FS_NAME);
1169 }
1170
1171 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1172 {
1173         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1174         struct attribute *attrs[] = {
1175                 &con->features_attr.attr,
1176                 NULL
1177         };
1178         struct attribute_group group = {
1179                 .name = RAS_FS_NAME,
1180                 .attrs = attrs,
1181         };
1182
1183         sysfs_remove_group(&adev->dev->kobj, &group);
1184
1185         return 0;
1186 }
1187
1188 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1189                 struct ras_fs_if *head)
1190 {
1191         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1192
1193         if (!obj || obj->attr_inuse)
1194                 return -EINVAL;
1195
1196         get_obj(obj);
1197
1198         memcpy(obj->fs_data.sysfs_name,
1199                         head->sysfs_name,
1200                         sizeof(obj->fs_data.sysfs_name));
1201
1202         obj->sysfs_attr = (struct device_attribute){
1203                 .attr = {
1204                         .name = obj->fs_data.sysfs_name,
1205                         .mode = S_IRUGO,
1206                 },
1207                         .show = amdgpu_ras_sysfs_read,
1208         };
1209         sysfs_attr_init(&obj->sysfs_attr.attr);
1210
1211         if (sysfs_add_file_to_group(&adev->dev->kobj,
1212                                 &obj->sysfs_attr.attr,
1213                                 RAS_FS_NAME)) {
1214                 put_obj(obj);
1215                 return -EINVAL;
1216         }
1217
1218         obj->attr_inuse = 1;
1219
1220         return 0;
1221 }
1222
1223 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1224                 struct ras_common_if *head)
1225 {
1226         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1227
1228         if (!obj || !obj->attr_inuse)
1229                 return -EINVAL;
1230
1231         sysfs_remove_file_from_group(&adev->dev->kobj,
1232                                 &obj->sysfs_attr.attr,
1233                                 RAS_FS_NAME);
1234         obj->attr_inuse = 0;
1235         put_obj(obj);
1236
1237         return 0;
1238 }
1239
1240 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1241 {
1242         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1243         struct ras_manager *obj, *tmp;
1244
1245         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1246                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1247         }
1248
1249         if (amdgpu_bad_page_threshold != 0)
1250                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1251
1252         amdgpu_ras_sysfs_remove_feature_node(adev);
1253
1254         return 0;
1255 }
1256 /* sysfs end */
1257
1258 /**
1259  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1260  *
1261  * Normally when there is an uncorrectable error, the driver will reset
1262  * the GPU to recover.  However, in the event of an unrecoverable error,
1263  * the driver provides an interface to reboot the system automatically
1264  * in that event.
1265  *
1266  * The following file in debugfs provides that interface:
1267  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1268  *
1269  * Usage:
1270  *
1271  * .. code-block:: bash
1272  *
1273  *      echo true > .../ras/auto_reboot
1274  *
1275  */
1276 /* debugfs begin */
1277 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1278 {
1279         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1280         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1281         struct dentry     *dir;
1282
1283         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1284         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1285                             &amdgpu_ras_debugfs_ctrl_ops);
1286         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1287                             &amdgpu_ras_debugfs_eeprom_ops);
1288         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1289                            &con->bad_page_cnt_threshold);
1290         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1291         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1292
1293         /*
1294          * After one uncorrectable error happens, usually GPU recovery will
1295          * be scheduled. But due to the known problem in GPU recovery failing
1296          * to bring GPU back, below interface provides one direct way to
1297          * user to reboot system automatically in such case within
1298          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1299          * will never be called.
1300          */
1301         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1302
1303         /*
1304          * User could set this not to clean up hardware's error count register
1305          * of RAS IPs during ras recovery.
1306          */
1307         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1308                             &con->disable_ras_err_cnt_harvest);
1309         return dir;
1310 }
1311
1312 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1313                                       struct ras_fs_if *head,
1314                                       struct dentry *dir)
1315 {
1316         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1317
1318         if (!obj || !dir)
1319                 return;
1320
1321         get_obj(obj);
1322
1323         memcpy(obj->fs_data.debugfs_name,
1324                         head->debugfs_name,
1325                         sizeof(obj->fs_data.debugfs_name));
1326
1327         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1328                             obj, &amdgpu_ras_debugfs_ops);
1329 }
1330
1331 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1332 {
1333         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1334         struct dentry *dir;
1335         struct ras_manager *obj;
1336         struct ras_fs_if fs_info;
1337
1338         /*
1339          * it won't be called in resume path, no need to check
1340          * suspend and gpu reset status
1341          */
1342         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1343                 return;
1344
1345         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1346
1347         list_for_each_entry(obj, &con->head, node) {
1348                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1349                         (obj->attr_inuse == 1)) {
1350                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1351                                         ras_block_str(obj->head.block));
1352                         fs_info.head = obj->head;
1353                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1354                 }
1355         }
1356 }
1357
1358 /* debugfs end */
1359
1360 /* ras fs */
1361 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1362                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1363 static DEVICE_ATTR(features, S_IRUGO,
1364                 amdgpu_ras_sysfs_features_read, NULL);
1365 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1366 {
1367         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1368         struct attribute_group group = {
1369                 .name = RAS_FS_NAME,
1370         };
1371         struct attribute *attrs[] = {
1372                 &con->features_attr.attr,
1373                 NULL
1374         };
1375         struct bin_attribute *bin_attrs[] = {
1376                 NULL,
1377                 NULL,
1378         };
1379         int r;
1380
1381         /* add features entry */
1382         con->features_attr = dev_attr_features;
1383         group.attrs = attrs;
1384         sysfs_attr_init(attrs[0]);
1385
1386         if (amdgpu_bad_page_threshold != 0) {
1387                 /* add bad_page_features entry */
1388                 bin_attr_gpu_vram_bad_pages.private = NULL;
1389                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1390                 bin_attrs[0] = &con->badpages_attr;
1391                 group.bin_attrs = bin_attrs;
1392                 sysfs_bin_attr_init(bin_attrs[0]);
1393         }
1394
1395         r = sysfs_create_group(&adev->dev->kobj, &group);
1396         if (r)
1397                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1398
1399         return 0;
1400 }
1401
1402 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1403 {
1404         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1405         struct ras_manager *con_obj, *ip_obj, *tmp;
1406
1407         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1408                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1409                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1410                         if (ip_obj)
1411                                 put_obj(ip_obj);
1412                 }
1413         }
1414
1415         amdgpu_ras_sysfs_remove_all(adev);
1416         return 0;
1417 }
1418 /* ras fs end */
1419
1420 /* ih begin */
1421 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1422 {
1423         struct ras_ih_data *data = &obj->ih_data;
1424         struct amdgpu_iv_entry entry;
1425         int ret;
1426         struct ras_err_data err_data = {0, 0, 0, NULL};
1427
1428         while (data->rptr != data->wptr) {
1429                 rmb();
1430                 memcpy(&entry, &data->ring[data->rptr],
1431                                 data->element_size);
1432
1433                 wmb();
1434                 data->rptr = (data->aligned_element_size +
1435                                 data->rptr) % data->ring_size;
1436
1437                 /* Let IP handle its data, maybe we need get the output
1438                  * from the callback to udpate the error type/count, etc
1439                  */
1440                 if (data->cb) {
1441                         ret = data->cb(obj->adev, &err_data, &entry);
1442                         /* ue will trigger an interrupt, and in that case
1443                          * we need do a reset to recovery the whole system.
1444                          * But leave IP do that recovery, here we just dispatch
1445                          * the error.
1446                          */
1447                         if (ret == AMDGPU_RAS_SUCCESS) {
1448                                 /* these counts could be left as 0 if
1449                                  * some blocks do not count error number
1450                                  */
1451                                 obj->err_data.ue_count += err_data.ue_count;
1452                                 obj->err_data.ce_count += err_data.ce_count;
1453                         }
1454                 }
1455         }
1456 }
1457
1458 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1459 {
1460         struct ras_ih_data *data =
1461                 container_of(work, struct ras_ih_data, ih_work);
1462         struct ras_manager *obj =
1463                 container_of(data, struct ras_manager, ih_data);
1464
1465         amdgpu_ras_interrupt_handler(obj);
1466 }
1467
1468 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1469                 struct ras_dispatch_if *info)
1470 {
1471         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1472         struct ras_ih_data *data = &obj->ih_data;
1473
1474         if (!obj)
1475                 return -EINVAL;
1476
1477         if (data->inuse == 0)
1478                 return 0;
1479
1480         /* Might be overflow... */
1481         memcpy(&data->ring[data->wptr], info->entry,
1482                         data->element_size);
1483
1484         wmb();
1485         data->wptr = (data->aligned_element_size +
1486                         data->wptr) % data->ring_size;
1487
1488         schedule_work(&data->ih_work);
1489
1490         return 0;
1491 }
1492
1493 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1494                 struct ras_ih_if *info)
1495 {
1496         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1497         struct ras_ih_data *data;
1498
1499         if (!obj)
1500                 return -EINVAL;
1501
1502         data = &obj->ih_data;
1503         if (data->inuse == 0)
1504                 return 0;
1505
1506         cancel_work_sync(&data->ih_work);
1507
1508         kfree(data->ring);
1509         memset(data, 0, sizeof(*data));
1510         put_obj(obj);
1511
1512         return 0;
1513 }
1514
1515 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1516                 struct ras_ih_if *info)
1517 {
1518         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1519         struct ras_ih_data *data;
1520
1521         if (!obj) {
1522                 /* in case we registe the IH before enable ras feature */
1523                 obj = amdgpu_ras_create_obj(adev, &info->head);
1524                 if (!obj)
1525                         return -EINVAL;
1526         } else
1527                 get_obj(obj);
1528
1529         data = &obj->ih_data;
1530         /* add the callback.etc */
1531         *data = (struct ras_ih_data) {
1532                 .inuse = 0,
1533                 .cb = info->cb,
1534                 .element_size = sizeof(struct amdgpu_iv_entry),
1535                 .rptr = 0,
1536                 .wptr = 0,
1537         };
1538
1539         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1540
1541         data->aligned_element_size = ALIGN(data->element_size, 8);
1542         /* the ring can store 64 iv entries. */
1543         data->ring_size = 64 * data->aligned_element_size;
1544         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1545         if (!data->ring) {
1546                 put_obj(obj);
1547                 return -ENOMEM;
1548         }
1549
1550         /* IH is ready */
1551         data->inuse = 1;
1552
1553         return 0;
1554 }
1555
1556 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1557 {
1558         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1559         struct ras_manager *obj, *tmp;
1560
1561         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1562                 struct ras_ih_if info = {
1563                         .head = obj->head,
1564                 };
1565                 amdgpu_ras_interrupt_remove_handler(adev, &info);
1566         }
1567
1568         return 0;
1569 }
1570 /* ih end */
1571
1572 /* traversal all IPs except NBIO to query error counter */
1573 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1574 {
1575         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1576         struct ras_manager *obj;
1577
1578         if (!adev->ras_enabled || !con)
1579                 return;
1580
1581         list_for_each_entry(obj, &con->head, node) {
1582                 struct ras_query_if info = {
1583                         .head = obj->head,
1584                 };
1585
1586                 /*
1587                  * PCIE_BIF IP has one different isr by ras controller
1588                  * interrupt, the specific ras counter query will be
1589                  * done in that isr. So skip such block from common
1590                  * sync flood interrupt isr calling.
1591                  */
1592                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1593                         continue;
1594
1595                 amdgpu_ras_query_error_status(adev, &info);
1596         }
1597 }
1598
1599 /* Parse RdRspStatus and WrRspStatus */
1600 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1601                                           struct ras_query_if *info)
1602 {
1603         /*
1604          * Only two block need to query read/write
1605          * RspStatus at current state
1606          */
1607         switch (info->head.block) {
1608         case AMDGPU_RAS_BLOCK__GFX:
1609                 if (adev->gfx.ras_funcs &&
1610                     adev->gfx.ras_funcs->query_ras_error_status)
1611                         adev->gfx.ras_funcs->query_ras_error_status(adev);
1612                 break;
1613         case AMDGPU_RAS_BLOCK__MMHUB:
1614                 if (adev->mmhub.ras_funcs &&
1615                     adev->mmhub.ras_funcs->query_ras_error_status)
1616                         adev->mmhub.ras_funcs->query_ras_error_status(adev);
1617                 break;
1618         default:
1619                 break;
1620         }
1621 }
1622
1623 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1624 {
1625         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1626         struct ras_manager *obj;
1627
1628         if (!adev->ras_enabled || !con)
1629                 return;
1630
1631         list_for_each_entry(obj, &con->head, node) {
1632                 struct ras_query_if info = {
1633                         .head = obj->head,
1634                 };
1635
1636                 amdgpu_ras_error_status_query(adev, &info);
1637         }
1638 }
1639
1640 /* recovery begin */
1641
1642 /* return 0 on success.
1643  * caller need free bps.
1644  */
1645 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1646                 struct ras_badpage **bps, unsigned int *count)
1647 {
1648         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1649         struct ras_err_handler_data *data;
1650         int i = 0;
1651         int ret = 0, status;
1652
1653         if (!con || !con->eh_data || !bps || !count)
1654                 return -EINVAL;
1655
1656         mutex_lock(&con->recovery_lock);
1657         data = con->eh_data;
1658         if (!data || data->count == 0) {
1659                 *bps = NULL;
1660                 ret = -EINVAL;
1661                 goto out;
1662         }
1663
1664         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1665         if (!*bps) {
1666                 ret = -ENOMEM;
1667                 goto out;
1668         }
1669
1670         for (; i < data->count; i++) {
1671                 (*bps)[i] = (struct ras_badpage){
1672                         .bp = data->bps[i].retired_page,
1673                         .size = AMDGPU_GPU_PAGE_SIZE,
1674                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1675                 };
1676                 status = amdgpu_vram_mgr_query_page_status(
1677                                 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1678                                 data->bps[i].retired_page);
1679                 if (status == -EBUSY)
1680                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1681                 else if (status == -ENOENT)
1682                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1683         }
1684
1685         *count = data->count;
1686 out:
1687         mutex_unlock(&con->recovery_lock);
1688         return ret;
1689 }
1690
1691 static void amdgpu_ras_do_recovery(struct work_struct *work)
1692 {
1693         struct amdgpu_ras *ras =
1694                 container_of(work, struct amdgpu_ras, recovery_work);
1695         struct amdgpu_device *remote_adev = NULL;
1696         struct amdgpu_device *adev = ras->adev;
1697         struct list_head device_list, *device_list_handle =  NULL;
1698
1699         if (!ras->disable_ras_err_cnt_harvest) {
1700                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1701
1702                 /* Build list of devices to query RAS related errors */
1703                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1704                         device_list_handle = &hive->device_list;
1705                 } else {
1706                         INIT_LIST_HEAD(&device_list);
1707                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
1708                         device_list_handle = &device_list;
1709                 }
1710
1711                 list_for_each_entry(remote_adev,
1712                                 device_list_handle, gmc.xgmi.head) {
1713                         amdgpu_ras_query_err_status(remote_adev);
1714                         amdgpu_ras_log_on_err_counter(remote_adev);
1715                 }
1716
1717                 amdgpu_put_xgmi_hive(hive);
1718         }
1719
1720         if (amdgpu_device_should_recover_gpu(ras->adev))
1721                 amdgpu_device_gpu_recover(ras->adev, NULL);
1722         atomic_set(&ras->in_recovery, 0);
1723 }
1724
1725 /* alloc/realloc bps array */
1726 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1727                 struct ras_err_handler_data *data, int pages)
1728 {
1729         unsigned int old_space = data->count + data->space_left;
1730         unsigned int new_space = old_space + pages;
1731         unsigned int align_space = ALIGN(new_space, 512);
1732         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1733
1734         if (!bps) {
1735                 kfree(bps);
1736                 return -ENOMEM;
1737         }
1738
1739         if (data->bps) {
1740                 memcpy(bps, data->bps,
1741                                 data->count * sizeof(*data->bps));
1742                 kfree(data->bps);
1743         }
1744
1745         data->bps = bps;
1746         data->space_left += align_space - old_space;
1747         return 0;
1748 }
1749
1750 /* it deal with vram only. */
1751 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1752                 struct eeprom_table_record *bps, int pages)
1753 {
1754         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1755         struct ras_err_handler_data *data;
1756         int ret = 0;
1757         uint32_t i;
1758
1759         if (!con || !con->eh_data || !bps || pages <= 0)
1760                 return 0;
1761
1762         mutex_lock(&con->recovery_lock);
1763         data = con->eh_data;
1764         if (!data)
1765                 goto out;
1766
1767         for (i = 0; i < pages; i++) {
1768                 if (amdgpu_ras_check_bad_page_unlock(con,
1769                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1770                         continue;
1771
1772                 if (!data->space_left &&
1773                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
1774                         ret = -ENOMEM;
1775                         goto out;
1776                 }
1777
1778                 amdgpu_vram_mgr_reserve_range(
1779                         ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1780                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1781                         AMDGPU_GPU_PAGE_SIZE);
1782
1783                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1784                 data->count++;
1785                 data->space_left--;
1786         }
1787 out:
1788         mutex_unlock(&con->recovery_lock);
1789
1790         return ret;
1791 }
1792
1793 /*
1794  * write error record array to eeprom, the function should be
1795  * protected by recovery_lock
1796  */
1797 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1798 {
1799         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1800         struct ras_err_handler_data *data;
1801         struct amdgpu_ras_eeprom_control *control;
1802         int save_count;
1803
1804         if (!con || !con->eh_data)
1805                 return 0;
1806
1807         control = &con->eeprom_control;
1808         data = con->eh_data;
1809         save_count = data->count - control->num_recs;
1810         /* only new entries are saved */
1811         if (save_count > 0) {
1812                 if (amdgpu_ras_eeprom_process_recods(control,
1813                                                         &data->bps[control->num_recs],
1814                                                         true,
1815                                                         save_count)) {
1816                         dev_err(adev->dev, "Failed to save EEPROM table data!");
1817                         return -EIO;
1818                 }
1819
1820                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1821         }
1822
1823         return 0;
1824 }
1825
1826 /*
1827  * read error record array in eeprom and reserve enough space for
1828  * storing new bad pages
1829  */
1830 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1831 {
1832         struct amdgpu_ras_eeprom_control *control =
1833                                         &adev->psp.ras.ras->eeprom_control;
1834         struct eeprom_table_record *bps = NULL;
1835         int ret = 0;
1836
1837         /* no bad page record, skip eeprom access */
1838         if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
1839                 return ret;
1840
1841         bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1842         if (!bps)
1843                 return -ENOMEM;
1844
1845         if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1846                 control->num_recs)) {
1847                 dev_err(adev->dev, "Failed to load EEPROM table records!");
1848                 ret = -EIO;
1849                 goto out;
1850         }
1851
1852         ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1853
1854 out:
1855         kfree(bps);
1856         return ret;
1857 }
1858
1859 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
1860                                 uint64_t addr)
1861 {
1862         struct ras_err_handler_data *data = con->eh_data;
1863         int i;
1864
1865         addr >>= AMDGPU_GPU_PAGE_SHIFT;
1866         for (i = 0; i < data->count; i++)
1867                 if (addr == data->bps[i].retired_page)
1868                         return true;
1869
1870         return false;
1871 }
1872
1873 /*
1874  * check if an address belongs to bad page
1875  *
1876  * Note: this check is only for umc block
1877  */
1878 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1879                                 uint64_t addr)
1880 {
1881         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1882         bool ret = false;
1883
1884         if (!con || !con->eh_data)
1885                 return ret;
1886
1887         mutex_lock(&con->recovery_lock);
1888         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
1889         mutex_unlock(&con->recovery_lock);
1890         return ret;
1891 }
1892
1893 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
1894                                         uint32_t max_length)
1895 {
1896         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1897         int tmp_threshold = amdgpu_bad_page_threshold;
1898         u64 val;
1899
1900         /*
1901          * Justification of value bad_page_cnt_threshold in ras structure
1902          *
1903          * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
1904          * in eeprom, and introduce two scenarios accordingly.
1905          *
1906          * Bad page retirement enablement:
1907          *    - If amdgpu_bad_page_threshold = -1,
1908          *      bad_page_cnt_threshold = typical value by formula.
1909          *
1910          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
1911          *      max record length in eeprom, use it directly.
1912          *
1913          * Bad page retirement disablement:
1914          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
1915          *      functionality is disabled, and bad_page_cnt_threshold will
1916          *      take no effect.
1917          */
1918
1919         if (tmp_threshold < -1)
1920                 tmp_threshold = -1;
1921         else if (tmp_threshold > max_length)
1922                 tmp_threshold = max_length;
1923
1924         if (tmp_threshold == -1) {
1925                 val = adev->gmc.mc_vram_size;
1926                 do_div(val, RAS_BAD_PAGE_RATE);
1927                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
1928                                                 max_length);
1929         } else {
1930                 con->bad_page_cnt_threshold = tmp_threshold;
1931         }
1932 }
1933
1934 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1935 {
1936         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1937         struct ras_err_handler_data **data;
1938         uint32_t max_eeprom_records_len = 0;
1939         bool exc_err_limit = false;
1940         int ret;
1941
1942         if (adev->ras_enabled && con)
1943                 data = &con->eh_data;
1944         else
1945                 return 0;
1946
1947         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1948         if (!*data) {
1949                 ret = -ENOMEM;
1950                 goto out;
1951         }
1952
1953         mutex_init(&con->recovery_lock);
1954         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1955         atomic_set(&con->in_recovery, 0);
1956         con->adev = adev;
1957
1958         max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
1959         amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
1960
1961         /* Todo: During test the SMU might fail to read the eeprom through I2C
1962          * when the GPU is pending on XGMI reset during probe time
1963          * (Mostly after second bus reset), skip it now
1964          */
1965         if (adev->gmc.xgmi.pending_reset)
1966                 return 0;
1967         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
1968         /*
1969          * This calling fails when exc_err_limit is true or
1970          * ret != 0.
1971          */
1972         if (exc_err_limit || ret)
1973                 goto free;
1974
1975         if (con->eeprom_control.num_recs) {
1976                 ret = amdgpu_ras_load_bad_pages(adev);
1977                 if (ret)
1978                         goto free;
1979         }
1980
1981         return 0;
1982
1983 free:
1984         kfree((*data)->bps);
1985         kfree(*data);
1986         con->eh_data = NULL;
1987 out:
1988         dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
1989
1990         /*
1991          * Except error threshold exceeding case, other failure cases in this
1992          * function would not fail amdgpu driver init.
1993          */
1994         if (!exc_err_limit)
1995                 ret = 0;
1996         else
1997                 ret = -EINVAL;
1998
1999         return ret;
2000 }
2001
2002 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2003 {
2004         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2005         struct ras_err_handler_data *data = con->eh_data;
2006
2007         /* recovery_init failed to init it, fini is useless */
2008         if (!data)
2009                 return 0;
2010
2011         cancel_work_sync(&con->recovery_work);
2012
2013         mutex_lock(&con->recovery_lock);
2014         con->eh_data = NULL;
2015         kfree(data->bps);
2016         kfree(data);
2017         mutex_unlock(&con->recovery_lock);
2018
2019         return 0;
2020 }
2021 /* recovery end */
2022
2023 /* return 0 if ras will reset gpu and repost.*/
2024 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
2025                 unsigned int block)
2026 {
2027         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2028
2029         if (!ras)
2030                 return -EINVAL;
2031
2032         ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2033         return 0;
2034 }
2035
2036 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2037 {
2038         return adev->asic_type == CHIP_VEGA10 ||
2039                 adev->asic_type == CHIP_VEGA20 ||
2040                 adev->asic_type == CHIP_ARCTURUS ||
2041                 adev->asic_type == CHIP_ALDEBARAN ||
2042                 adev->asic_type == CHIP_SIENNA_CICHLID;
2043 }
2044
2045 /*
2046  * this is workaround for vega20 workstation sku,
2047  * force enable gfx ras, ignore vbios gfx ras flag
2048  * due to GC EDC can not write
2049  */
2050 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2051 {
2052         struct atom_context *ctx = adev->mode_info.atom_context;
2053
2054         if (!ctx)
2055                 return;
2056
2057         if (strnstr(ctx->vbios_version, "D16406",
2058                     sizeof(ctx->vbios_version)))
2059                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2060 }
2061
2062 /*
2063  * check hardware's ras ability which will be saved in hw_supported.
2064  * if hardware does not support ras, we can skip some ras initializtion and
2065  * forbid some ras operations from IP.
2066  * if software itself, say boot parameter, limit the ras ability. We still
2067  * need allow IP do some limited operations, like disable. In such case,
2068  * we have to initialize ras as normal. but need check if operation is
2069  * allowed or not in each function.
2070  */
2071 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2072 {
2073         adev->ras_hw_enabled = adev->ras_enabled = 0;
2074
2075         if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
2076             !amdgpu_ras_asic_supported(adev))
2077                 return;
2078
2079         if (!adev->gmc.xgmi.connected_to_cpu) {
2080                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2081                         dev_info(adev->dev, "MEM ECC is active.\n");
2082                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2083                                                    1 << AMDGPU_RAS_BLOCK__DF);
2084                 } else {
2085                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2086                 }
2087
2088                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2089                         dev_info(adev->dev, "SRAM ECC is active.\n");
2090                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2091                                                     1 << AMDGPU_RAS_BLOCK__DF);
2092                 } else {
2093                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2094                 }
2095         } else {
2096                 /* driver only manages a few IP blocks RAS feature
2097                  * when GPU is connected cpu through XGMI */
2098                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2099                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2100                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2101         }
2102
2103         amdgpu_ras_get_quirks(adev);
2104
2105         /* hw_supported needs to be aligned with RAS block mask. */
2106         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2107
2108         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2109                 adev->ras_hw_enabled & amdgpu_ras_mask;
2110 }
2111
2112 int amdgpu_ras_init(struct amdgpu_device *adev)
2113 {
2114         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2115         int r;
2116
2117         if (con)
2118                 return 0;
2119
2120         con = kmalloc(sizeof(struct amdgpu_ras) +
2121                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
2122                         GFP_KERNEL|__GFP_ZERO);
2123         if (!con)
2124                 return -ENOMEM;
2125
2126         con->objs = (struct ras_manager *)(con + 1);
2127
2128         amdgpu_ras_set_context(adev, con);
2129
2130         amdgpu_ras_check_supported(adev);
2131
2132         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2133                 /* set gfx block ras context feature for VEGA20 Gaming
2134                  * send ras disable cmd to ras ta during ras late init.
2135                  */
2136                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2137                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2138
2139                         return 0;
2140                 }
2141
2142                 r = 0;
2143                 goto release_con;
2144         }
2145
2146         con->features = 0;
2147         INIT_LIST_HEAD(&con->head);
2148         /* Might need get this flag from vbios. */
2149         con->flags = RAS_DEFAULT_FLAGS;
2150
2151         /* initialize nbio ras function ahead of any other
2152          * ras functions so hardware fatal error interrupt
2153          * can be enabled as early as possible */
2154         switch (adev->asic_type) {
2155         case CHIP_VEGA20:
2156         case CHIP_ARCTURUS:
2157         case CHIP_ALDEBARAN:
2158                 if (!adev->gmc.xgmi.connected_to_cpu)
2159                         adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
2160                 break;
2161         default:
2162                 /* nbio ras is not available */
2163                 break;
2164         }
2165
2166         if (adev->nbio.ras_funcs &&
2167             adev->nbio.ras_funcs->init_ras_controller_interrupt) {
2168                 r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
2169                 if (r)
2170                         goto release_con;
2171         }
2172
2173         if (adev->nbio.ras_funcs &&
2174             adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
2175                 r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
2176                 if (r)
2177                         goto release_con;
2178         }
2179
2180         if (amdgpu_ras_fs_init(adev)) {
2181                 r = -EINVAL;
2182                 goto release_con;
2183         }
2184
2185         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2186                  "hardware ability[%x] ras_mask[%x]\n",
2187                  adev->ras_hw_enabled, adev->ras_enabled);
2188
2189         return 0;
2190 release_con:
2191         amdgpu_ras_set_context(adev, NULL);
2192         kfree(con);
2193
2194         return r;
2195 }
2196
2197 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2198 {
2199         if (adev->gmc.xgmi.connected_to_cpu)
2200                 return 1;
2201         return 0;
2202 }
2203
2204 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2205                                         struct ras_common_if *ras_block)
2206 {
2207         struct ras_query_if info = {
2208                 .head = *ras_block,
2209         };
2210
2211         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2212                 return 0;
2213
2214         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2215                 DRM_WARN("RAS init harvest failure");
2216
2217         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2218                 DRM_WARN("RAS init harvest reset failure");
2219
2220         return 0;
2221 }
2222
2223 /* helper function to handle common stuff in ip late init phase */
2224 int amdgpu_ras_late_init(struct amdgpu_device *adev,
2225                          struct ras_common_if *ras_block,
2226                          struct ras_fs_if *fs_info,
2227                          struct ras_ih_if *ih_info)
2228 {
2229         int r;
2230
2231         /* disable RAS feature per IP block if it is not supported */
2232         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2233                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2234                 return 0;
2235         }
2236
2237         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2238         if (r) {
2239                 if (r == -EAGAIN) {
2240                         /* request gpu reset. will run again */
2241                         amdgpu_ras_request_reset_on_boot(adev,
2242                                         ras_block->block);
2243                         return 0;
2244                 } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
2245                         /* in resume phase, if fail to enable ras,
2246                          * clean up all ras fs nodes, and disable ras */
2247                         goto cleanup;
2248                 } else
2249                         return r;
2250         }
2251
2252         /* check for errors on warm reset edc persisant supported ASIC */
2253         amdgpu_persistent_edc_harvesting(adev, ras_block);
2254
2255         /* in resume phase, no need to create ras fs node */
2256         if (adev->in_suspend || amdgpu_in_reset(adev))
2257                 return 0;
2258
2259         if (ih_info->cb) {
2260                 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2261                 if (r)
2262                         goto interrupt;
2263         }
2264
2265         r = amdgpu_ras_sysfs_create(adev, fs_info);
2266         if (r)
2267                 goto sysfs;
2268
2269         return 0;
2270 cleanup:
2271         amdgpu_ras_sysfs_remove(adev, ras_block);
2272 sysfs:
2273         if (ih_info->cb)
2274                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2275 interrupt:
2276         amdgpu_ras_feature_enable(adev, ras_block, 0);
2277         return r;
2278 }
2279
2280 /* helper function to remove ras fs node and interrupt handler */
2281 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2282                           struct ras_common_if *ras_block,
2283                           struct ras_ih_if *ih_info)
2284 {
2285         if (!ras_block || !ih_info)
2286                 return;
2287
2288         amdgpu_ras_sysfs_remove(adev, ras_block);
2289         if (ih_info->cb)
2290                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2291         amdgpu_ras_feature_enable(adev, ras_block, 0);
2292 }
2293
2294 /* do some init work after IP late init as dependence.
2295  * and it runs in resume/gpu reset/booting up cases.
2296  */
2297 void amdgpu_ras_resume(struct amdgpu_device *adev)
2298 {
2299         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2300         struct ras_manager *obj, *tmp;
2301
2302         if (!adev->ras_enabled || !con) {
2303                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2304                 amdgpu_release_ras_context(adev);
2305
2306                 return;
2307         }
2308
2309         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2310                 /* Set up all other IPs which are not implemented. There is a
2311                  * tricky thing that IP's actual ras error type should be
2312                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2313                  * ERROR_NONE make sense anyway.
2314                  */
2315                 amdgpu_ras_enable_all_features(adev, 1);
2316
2317                 /* We enable ras on all hw_supported block, but as boot
2318                  * parameter might disable some of them and one or more IP has
2319                  * not implemented yet. So we disable them on behalf.
2320                  */
2321                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2322                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2323                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2324                                 /* there should be no any reference. */
2325                                 WARN_ON(alive_obj(obj));
2326                         }
2327                 }
2328         }
2329
2330         if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
2331                 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2332                 /* setup ras obj state as disabled.
2333                  * for init_by_vbios case.
2334                  * if we want to enable ras, just enable it in a normal way.
2335                  * If we want do disable it, need setup ras obj as enabled,
2336                  * then issue another TA disable cmd.
2337                  * See feature_enable_on_boot
2338                  */
2339                 amdgpu_ras_disable_all_features(adev, 1);
2340                 amdgpu_ras_reset_gpu(adev);
2341         }
2342 }
2343
2344 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2345 {
2346         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2347
2348         if (!adev->ras_enabled || !con)
2349                 return;
2350
2351         amdgpu_ras_disable_all_features(adev, 0);
2352         /* Make sure all ras objects are disabled. */
2353         if (con->features)
2354                 amdgpu_ras_disable_all_features(adev, 1);
2355 }
2356
2357 /* do some fini work before IP fini as dependence */
2358 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2359 {
2360         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2361
2362         if (!adev->ras_enabled || !con)
2363                 return 0;
2364
2365
2366         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2367         amdgpu_ras_disable_all_features(adev, 0);
2368         amdgpu_ras_recovery_fini(adev);
2369         return 0;
2370 }
2371
2372 int amdgpu_ras_fini(struct amdgpu_device *adev)
2373 {
2374         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2375
2376         if (!adev->ras_enabled || !con)
2377                 return 0;
2378
2379         amdgpu_ras_fs_fini(adev);
2380         amdgpu_ras_interrupt_remove_all(adev);
2381
2382         WARN(con->features, "Feature mask is not cleared");
2383
2384         if (con->features)
2385                 amdgpu_ras_disable_all_features(adev, 1);
2386
2387         amdgpu_ras_set_context(adev, NULL);
2388         kfree(con);
2389
2390         return 0;
2391 }
2392
2393 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2394 {
2395         amdgpu_ras_check_supported(adev);
2396         if (!adev->ras_hw_enabled)
2397                 return;
2398
2399         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2400                 dev_info(adev->dev, "uncorrectable hardware error"
2401                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2402
2403                 amdgpu_ras_reset_gpu(adev);
2404         }
2405 }
2406
2407 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2408 {
2409         if (adev->asic_type == CHIP_VEGA20 &&
2410             adev->pm.fw_version <= 0x283400) {
2411                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2412                                 amdgpu_ras_intr_triggered();
2413         }
2414
2415         return false;
2416 }
2417
2418 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2419 {
2420         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2421
2422         if (!con)
2423                 return;
2424
2425         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2426                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2427                 amdgpu_ras_set_context(adev, NULL);
2428                 kfree(con);
2429         }
2430 }