drm/amdgpu: Export ras_*_enabled to debugfs
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_xgmi.h"
35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
36 #include "atom.h"
37
38 static const char *RAS_FS_NAME = "ras";
39
40 const char *ras_error_string[] = {
41         "none",
42         "parity",
43         "single_correctable",
44         "multi_uncorrectable",
45         "poison",
46 };
47
48 const char *ras_block_string[] = {
49         "umc",
50         "sdma",
51         "gfx",
52         "mmhub",
53         "athub",
54         "pcie_bif",
55         "hdp",
56         "xgmi_wafl",
57         "df",
58         "smn",
59         "sem",
60         "mp0",
61         "mp1",
62         "fuse",
63 };
64
65 #define ras_err_str(i) (ras_error_string[ffs(i)])
66 #define ras_block_str(i) (ras_block_string[i])
67
68 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
69
70 /* inject address is 52 bits */
71 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
72
73 /* typical ECC bad page rate(1 bad page per 100MB VRAM) */
74 #define RAS_BAD_PAGE_RATE               (100 * 1024 * 1024ULL)
75
76 enum amdgpu_ras_retire_page_reservation {
77         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
78         AMDGPU_RAS_RETIRE_PAGE_PENDING,
79         AMDGPU_RAS_RETIRE_PAGE_FAULT,
80 };
81
82 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
83
84 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
85                                 uint64_t addr);
86 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
87                                 uint64_t addr);
88
89 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
90 {
91         if (adev && amdgpu_ras_get_context(adev))
92                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
93 }
94
95 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
96 {
97         if (adev && amdgpu_ras_get_context(adev))
98                 return amdgpu_ras_get_context(adev)->error_query_ready;
99
100         return false;
101 }
102
103 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
104 {
105         struct ras_err_data err_data = {0, 0, 0, NULL};
106         struct eeprom_table_record err_rec;
107
108         if ((address >= adev->gmc.mc_vram_size) ||
109             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
110                 dev_warn(adev->dev,
111                          "RAS WARN: input address 0x%llx is invalid.\n",
112                          address);
113                 return -EINVAL;
114         }
115
116         if (amdgpu_ras_check_bad_page(adev, address)) {
117                 dev_warn(adev->dev,
118                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
119                          address);
120                 return 0;
121         }
122
123         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
124
125         err_rec.address = address;
126         err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
127         err_rec.ts = (uint64_t)ktime_get_real_seconds();
128         err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
129
130         err_data.err_addr = &err_rec;
131         err_data.err_addr_cnt = 1;
132
133         if (amdgpu_bad_page_threshold != 0) {
134                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
135                                          err_data.err_addr_cnt);
136                 amdgpu_ras_save_bad_pages(adev);
137         }
138
139         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
140         dev_warn(adev->dev, "Clear EEPROM:\n");
141         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
142
143         return 0;
144 }
145
146 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
147                                         size_t size, loff_t *pos)
148 {
149         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
150         struct ras_query_if info = {
151                 .head = obj->head,
152         };
153         ssize_t s;
154         char val[128];
155
156         if (amdgpu_ras_query_error_status(obj->adev, &info))
157                 return -EINVAL;
158
159         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
160                         "ue", info.ue_count,
161                         "ce", info.ce_count);
162         if (*pos >= s)
163                 return 0;
164
165         s -= *pos;
166         s = min_t(u64, s, size);
167
168
169         if (copy_to_user(buf, &val[*pos], s))
170                 return -EINVAL;
171
172         *pos += s;
173
174         return s;
175 }
176
177 static const struct file_operations amdgpu_ras_debugfs_ops = {
178         .owner = THIS_MODULE,
179         .read = amdgpu_ras_debugfs_read,
180         .write = NULL,
181         .llseek = default_llseek
182 };
183
184 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
185 {
186         int i;
187
188         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
189                 *block_id = i;
190                 if (strcmp(name, ras_block_str(i)) == 0)
191                         return 0;
192         }
193         return -EINVAL;
194 }
195
196 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
197                 const char __user *buf, size_t size,
198                 loff_t *pos, struct ras_debug_if *data)
199 {
200         ssize_t s = min_t(u64, 64, size);
201         char str[65];
202         char block_name[33];
203         char err[9] = "ue";
204         int op = -1;
205         int block_id;
206         uint32_t sub_block;
207         u64 address, value;
208
209         if (*pos)
210                 return -EINVAL;
211         *pos = size;
212
213         memset(str, 0, sizeof(str));
214         memset(data, 0, sizeof(*data));
215
216         if (copy_from_user(str, buf, s))
217                 return -EINVAL;
218
219         if (sscanf(str, "disable %32s", block_name) == 1)
220                 op = 0;
221         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
222                 op = 1;
223         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
224                 op = 2;
225         else if (strstr(str, "retire_page") != NULL)
226                 op = 3;
227         else if (str[0] && str[1] && str[2] && str[3])
228                 /* ascii string, but commands are not matched. */
229                 return -EINVAL;
230
231         if (op != -1) {
232                 if (op == 3) {
233                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
234                             sscanf(str, "%*s %llu", &address) != 1)
235                                 return -EINVAL;
236
237                         data->op = op;
238                         data->inject.address = address;
239
240                         return 0;
241                 }
242
243                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
244                         return -EINVAL;
245
246                 data->head.block = block_id;
247                 /* only ue and ce errors are supported */
248                 if (!memcmp("ue", err, 2))
249                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
250                 else if (!memcmp("ce", err, 2))
251                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
252                 else
253                         return -EINVAL;
254
255                 data->op = op;
256
257                 if (op == 2) {
258                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
259                                    &sub_block, &address, &value) != 3 &&
260                             sscanf(str, "%*s %*s %*s %u %llu %llu",
261                                    &sub_block, &address, &value) != 3)
262                                 return -EINVAL;
263                         data->head.sub_block_index = sub_block;
264                         data->inject.address = address;
265                         data->inject.value = value;
266                 }
267         } else {
268                 if (size < sizeof(*data))
269                         return -EINVAL;
270
271                 if (copy_from_user(data, buf, sizeof(*data)))
272                         return -EINVAL;
273         }
274
275         return 0;
276 }
277
278 /**
279  * DOC: AMDGPU RAS debugfs control interface
280  *
281  * The control interface accepts struct ras_debug_if which has two members.
282  *
283  * First member: ras_debug_if::head or ras_debug_if::inject.
284  *
285  * head is used to indicate which IP block will be under control.
286  *
287  * head has four members, they are block, type, sub_block_index, name.
288  * block: which IP will be under control.
289  * type: what kind of error will be enabled/disabled/injected.
290  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
291  * name: the name of IP.
292  *
293  * inject has two more members than head, they are address, value.
294  * As their names indicate, inject operation will write the
295  * value to the address.
296  *
297  * The second member: struct ras_debug_if::op.
298  * It has three kinds of operations.
299  *
300  * - 0: disable RAS on the block. Take ::head as its data.
301  * - 1: enable RAS on the block. Take ::head as its data.
302  * - 2: inject errors on the block. Take ::inject as its data.
303  *
304  * How to use the interface?
305  *
306  * In a program
307  *
308  * Copy the struct ras_debug_if in your code and initialize it.
309  * Write the struct to the control interface.
310  *
311  * From shell
312  *
313  * .. code-block:: bash
314  *
315  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
316  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
317  *      echo "inject  <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
318  *
319  * Where N, is the card which you want to affect.
320  *
321  * "disable" requires only the block.
322  * "enable" requires the block and error type.
323  * "inject" requires the block, error type, address, and value.
324  * The block is one of: umc, sdma, gfx, etc.
325  *      see ras_block_string[] for details
326  * The error type is one of: ue, ce, where,
327  *      ue is multi-uncorrectable
328  *      ce is single-correctable
329  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
330  * The address and value are hexadecimal numbers, leading 0x is optional.
331  *
332  * For instance,
333  *
334  * .. code-block:: bash
335  *
336  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
337  *      echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
338  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
339  *
340  * How to check the result of the operation?
341  *
342  * To check disable/enable, see "ras" features at,
343  * /sys/class/drm/card[0/1/2...]/device/ras/features
344  *
345  * To check inject, see the corresponding error count at,
346  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
347  *
348  * .. note::
349  *      Operations are only allowed on blocks which are supported.
350  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
351  *      to see which blocks support RAS on a particular asic.
352  *
353  */
354 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
355                 size_t size, loff_t *pos)
356 {
357         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
358         struct ras_debug_if data;
359         int ret = 0;
360
361         if (!amdgpu_ras_get_error_query_ready(adev)) {
362                 dev_warn(adev->dev, "RAS WARN: error injection "
363                                 "currently inaccessible\n");
364                 return size;
365         }
366
367         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
368         if (ret)
369                 return -EINVAL;
370
371         if (data.op == 3) {
372                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
373                 if (!ret)
374                         return size;
375                 else
376                         return ret;
377         }
378
379         if (!amdgpu_ras_is_supported(adev, data.head.block))
380                 return -EINVAL;
381
382         switch (data.op) {
383         case 0:
384                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
385                 break;
386         case 1:
387                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
388                 break;
389         case 2:
390                 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
391                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
392                         dev_warn(adev->dev, "RAS WARN: input address "
393                                         "0x%llx is invalid.",
394                                         data.inject.address);
395                         ret = -EINVAL;
396                         break;
397                 }
398
399                 /* umc ce/ue error injection for a bad page is not allowed */
400                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
401                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
402                         dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
403                                         "as bad before error injection!\n",
404                                         data.inject.address);
405                         break;
406                 }
407
408                 /* data.inject.address is offset instead of absolute gpu address */
409                 ret = amdgpu_ras_error_inject(adev, &data.inject);
410                 break;
411         default:
412                 ret = -EINVAL;
413                 break;
414         }
415
416         if (ret)
417                 return -EINVAL;
418
419         return size;
420 }
421
422 /**
423  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
424  *
425  * Some boards contain an EEPROM which is used to persistently store a list of
426  * bad pages which experiences ECC errors in vram.  This interface provides
427  * a way to reset the EEPROM, e.g., after testing error injection.
428  *
429  * Usage:
430  *
431  * .. code-block:: bash
432  *
433  *      echo 1 > ../ras/ras_eeprom_reset
434  *
435  * will reset EEPROM table to 0 entries.
436  *
437  */
438 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
439                 size_t size, loff_t *pos)
440 {
441         struct amdgpu_device *adev =
442                 (struct amdgpu_device *)file_inode(f)->i_private;
443         int ret;
444
445         ret = amdgpu_ras_eeprom_reset_table(
446                         &(amdgpu_ras_get_context(adev)->eeprom_control));
447
448         if (ret == 1) {
449                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
450                 return size;
451         } else {
452                 return -EIO;
453         }
454 }
455
456 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
457         .owner = THIS_MODULE,
458         .read = NULL,
459         .write = amdgpu_ras_debugfs_ctrl_write,
460         .llseek = default_llseek
461 };
462
463 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
464         .owner = THIS_MODULE,
465         .read = NULL,
466         .write = amdgpu_ras_debugfs_eeprom_write,
467         .llseek = default_llseek
468 };
469
470 /**
471  * DOC: AMDGPU RAS sysfs Error Count Interface
472  *
473  * It allows the user to read the error count for each IP block on the gpu through
474  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
475  *
476  * It outputs the multiple lines which report the uncorrected (ue) and corrected
477  * (ce) error counts.
478  *
479  * The format of one line is below,
480  *
481  * [ce|ue]: count
482  *
483  * Example:
484  *
485  * .. code-block:: bash
486  *
487  *      ue: 0
488  *      ce: 1
489  *
490  */
491 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
492                 struct device_attribute *attr, char *buf)
493 {
494         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
495         struct ras_query_if info = {
496                 .head = obj->head,
497         };
498
499         if (!amdgpu_ras_get_error_query_ready(obj->adev))
500                 return sysfs_emit(buf, "Query currently inaccessible\n");
501
502         if (amdgpu_ras_query_error_status(obj->adev, &info))
503                 return -EINVAL;
504
505
506         if (obj->adev->asic_type == CHIP_ALDEBARAN) {
507                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
508                         DRM_WARN("Failed to reset error counter and error status");
509         }
510
511         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
512                           "ce", info.ce_count);
513 }
514
515 /* obj begin */
516
517 #define get_obj(obj) do { (obj)->use++; } while (0)
518 #define alive_obj(obj) ((obj)->use)
519
520 static inline void put_obj(struct ras_manager *obj)
521 {
522         if (obj && (--obj->use == 0))
523                 list_del(&obj->node);
524         if (obj && (obj->use < 0))
525                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
526 }
527
528 /* make one obj and return it. */
529 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
530                 struct ras_common_if *head)
531 {
532         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
533         struct ras_manager *obj;
534
535         if (!adev->ras_enabled || !con)
536                 return NULL;
537
538         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
539                 return NULL;
540
541         obj = &con->objs[head->block];
542         /* already exist. return obj? */
543         if (alive_obj(obj))
544                 return NULL;
545
546         obj->head = *head;
547         obj->adev = adev;
548         list_add(&obj->node, &con->head);
549         get_obj(obj);
550
551         return obj;
552 }
553
554 /* return an obj equal to head, or the first when head is NULL */
555 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
556                 struct ras_common_if *head)
557 {
558         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
559         struct ras_manager *obj;
560         int i;
561
562         if (!adev->ras_enabled || !con)
563                 return NULL;
564
565         if (head) {
566                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
567                         return NULL;
568
569                 obj = &con->objs[head->block];
570
571                 if (alive_obj(obj)) {
572                         WARN_ON(head->block != obj->head.block);
573                         return obj;
574                 }
575         } else {
576                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
577                         obj = &con->objs[i];
578                         if (alive_obj(obj)) {
579                                 WARN_ON(i != obj->head.block);
580                                 return obj;
581                         }
582                 }
583         }
584
585         return NULL;
586 }
587 /* obj end */
588
589 static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
590                                          const char* invoke_type,
591                                          const char* block_name,
592                                          enum ta_ras_status ret)
593 {
594         switch (ret) {
595         case TA_RAS_STATUS__SUCCESS:
596                 return;
597         case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
598                 dev_warn(adev->dev,
599                         "RAS WARN: %s %s currently unavailable\n",
600                         invoke_type,
601                         block_name);
602                 break;
603         default:
604                 dev_err(adev->dev,
605                         "RAS ERROR: %s %s error failed ret 0x%X\n",
606                         invoke_type,
607                         block_name,
608                         ret);
609         }
610 }
611
612 /* feature ctl begin */
613 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
614                                          struct ras_common_if *head)
615 {
616         return adev->ras_hw_enabled & BIT(head->block);
617 }
618
619 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
620                 struct ras_common_if *head)
621 {
622         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
623
624         return con->features & BIT(head->block);
625 }
626
627 /*
628  * if obj is not created, then create one.
629  * set feature enable flag.
630  */
631 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
632                 struct ras_common_if *head, int enable)
633 {
634         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
635         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
636
637         /* If hardware does not support ras, then do not create obj.
638          * But if hardware support ras, we can create the obj.
639          * Ras framework checks con->hw_supported to see if it need do
640          * corresponding initialization.
641          * IP checks con->support to see if it need disable ras.
642          */
643         if (!amdgpu_ras_is_feature_allowed(adev, head))
644                 return 0;
645         if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
646                 return 0;
647
648         if (enable) {
649                 if (!obj) {
650                         obj = amdgpu_ras_create_obj(adev, head);
651                         if (!obj)
652                                 return -EINVAL;
653                 } else {
654                         /* In case we create obj somewhere else */
655                         get_obj(obj);
656                 }
657                 con->features |= BIT(head->block);
658         } else {
659                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
660                         con->features &= ~BIT(head->block);
661                         put_obj(obj);
662                 }
663         }
664
665         return 0;
666 }
667
668 /* wrapper of psp_ras_enable_features */
669 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
670                 struct ras_common_if *head, bool enable)
671 {
672         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
673         union ta_ras_cmd_input *info;
674         int ret;
675
676         if (!con)
677                 return -EINVAL;
678
679         info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
680         if (!info)
681                 return -ENOMEM;
682
683         if (!enable) {
684                 info->disable_features = (struct ta_ras_disable_features_input) {
685                         .block_id =  amdgpu_ras_block_to_ta(head->block),
686                         .error_type = amdgpu_ras_error_to_ta(head->type),
687                 };
688         } else {
689                 info->enable_features = (struct ta_ras_enable_features_input) {
690                         .block_id =  amdgpu_ras_block_to_ta(head->block),
691                         .error_type = amdgpu_ras_error_to_ta(head->type),
692                 };
693         }
694
695         /* Do not enable if it is not allowed. */
696         WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
697         /* Are we alerady in that state we are going to set? */
698         if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
699                 ret = 0;
700                 goto out;
701         }
702
703         if (!amdgpu_ras_intr_triggered()) {
704                 ret = psp_ras_enable_features(&adev->psp, info, enable);
705                 if (ret) {
706                         amdgpu_ras_parse_status_code(adev,
707                                                      enable ? "enable":"disable",
708                                                      ras_block_str(head->block),
709                                                     (enum ta_ras_status)ret);
710                         if (ret == TA_RAS_STATUS__RESET_NEEDED)
711                                 ret = -EAGAIN;
712                         else
713                                 ret = -EINVAL;
714
715                         goto out;
716                 }
717         }
718
719         /* setup the obj */
720         __amdgpu_ras_feature_enable(adev, head, enable);
721         ret = 0;
722 out:
723         kfree(info);
724         return ret;
725 }
726
727 /* Only used in device probe stage and called only once. */
728 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
729                 struct ras_common_if *head, bool enable)
730 {
731         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
732         int ret;
733
734         if (!con)
735                 return -EINVAL;
736
737         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
738                 if (enable) {
739                         /* There is no harm to issue a ras TA cmd regardless of
740                          * the currecnt ras state.
741                          * If current state == target state, it will do nothing
742                          * But sometimes it requests driver to reset and repost
743                          * with error code -EAGAIN.
744                          */
745                         ret = amdgpu_ras_feature_enable(adev, head, 1);
746                         /* With old ras TA, we might fail to enable ras.
747                          * Log it and just setup the object.
748                          * TODO need remove this WA in the future.
749                          */
750                         if (ret == -EINVAL) {
751                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
752                                 if (!ret)
753                                         dev_info(adev->dev,
754                                                 "RAS INFO: %s setup object\n",
755                                                 ras_block_str(head->block));
756                         }
757                 } else {
758                         /* setup the object then issue a ras TA disable cmd.*/
759                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
760                         if (ret)
761                                 return ret;
762
763                         /* gfx block ras dsiable cmd must send to ras-ta */
764                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
765                                 con->features |= BIT(head->block);
766
767                         ret = amdgpu_ras_feature_enable(adev, head, 0);
768
769                         /* clean gfx block ras features flag */
770                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
771                                 con->features &= ~BIT(head->block);
772                 }
773         } else
774                 ret = amdgpu_ras_feature_enable(adev, head, enable);
775
776         return ret;
777 }
778
779 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
780                 bool bypass)
781 {
782         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
783         struct ras_manager *obj, *tmp;
784
785         list_for_each_entry_safe(obj, tmp, &con->head, node) {
786                 /* bypass psp.
787                  * aka just release the obj and corresponding flags
788                  */
789                 if (bypass) {
790                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
791                                 break;
792                 } else {
793                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
794                                 break;
795                 }
796         }
797
798         return con->features;
799 }
800
801 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
802                 bool bypass)
803 {
804         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
805         int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
806         int i;
807         const enum amdgpu_ras_error_type default_ras_type =
808                 AMDGPU_RAS_ERROR__NONE;
809
810         for (i = 0; i < ras_block_count; i++) {
811                 struct ras_common_if head = {
812                         .block = i,
813                         .type = default_ras_type,
814                         .sub_block_index = 0,
815                 };
816                 strcpy(head.name, ras_block_str(i));
817                 if (bypass) {
818                         /*
819                          * bypass psp. vbios enable ras for us.
820                          * so just create the obj
821                          */
822                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
823                                 break;
824                 } else {
825                         if (amdgpu_ras_feature_enable(adev, &head, 1))
826                                 break;
827                 }
828         }
829
830         return con->features;
831 }
832 /* feature ctl end */
833
834 /* query/inject/cure begin */
835 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
836         struct ras_query_if *info)
837 {
838         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
839         struct ras_err_data err_data = {0, 0, 0, NULL};
840         int i;
841
842         if (!obj)
843                 return -EINVAL;
844
845         switch (info->head.block) {
846         case AMDGPU_RAS_BLOCK__UMC:
847                 if (adev->umc.ras_funcs &&
848                     adev->umc.ras_funcs->query_ras_error_count)
849                         adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
850                 /* umc query_ras_error_address is also responsible for clearing
851                  * error status
852                  */
853                 if (adev->umc.ras_funcs &&
854                     adev->umc.ras_funcs->query_ras_error_address)
855                         adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
856                 break;
857         case AMDGPU_RAS_BLOCK__SDMA:
858                 if (adev->sdma.funcs->query_ras_error_count) {
859                         for (i = 0; i < adev->sdma.num_instances; i++)
860                                 adev->sdma.funcs->query_ras_error_count(adev, i,
861                                                                         &err_data);
862                 }
863                 break;
864         case AMDGPU_RAS_BLOCK__GFX:
865                 if (adev->gfx.ras_funcs &&
866                     adev->gfx.ras_funcs->query_ras_error_count)
867                         adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
868
869                 if (adev->gfx.ras_funcs &&
870                     adev->gfx.ras_funcs->query_ras_error_status)
871                         adev->gfx.ras_funcs->query_ras_error_status(adev);
872                 break;
873         case AMDGPU_RAS_BLOCK__MMHUB:
874                 if (adev->mmhub.ras_funcs &&
875                     adev->mmhub.ras_funcs->query_ras_error_count)
876                         adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
877
878                 if (adev->mmhub.ras_funcs &&
879                     adev->mmhub.ras_funcs->query_ras_error_status)
880                         adev->mmhub.ras_funcs->query_ras_error_status(adev);
881                 break;
882         case AMDGPU_RAS_BLOCK__PCIE_BIF:
883                 if (adev->nbio.ras_funcs &&
884                     adev->nbio.ras_funcs->query_ras_error_count)
885                         adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
886                 break;
887         case AMDGPU_RAS_BLOCK__XGMI_WAFL:
888                 if (adev->gmc.xgmi.ras_funcs &&
889                     adev->gmc.xgmi.ras_funcs->query_ras_error_count)
890                         adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
891                 break;
892         case AMDGPU_RAS_BLOCK__HDP:
893                 if (adev->hdp.ras_funcs &&
894                     adev->hdp.ras_funcs->query_ras_error_count)
895                         adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
896                 break;
897         default:
898                 break;
899         }
900
901         obj->err_data.ue_count += err_data.ue_count;
902         obj->err_data.ce_count += err_data.ce_count;
903
904         info->ue_count = obj->err_data.ue_count;
905         info->ce_count = obj->err_data.ce_count;
906
907         if (err_data.ce_count) {
908                 if (adev->smuio.funcs &&
909                     adev->smuio.funcs->get_socket_id &&
910                     adev->smuio.funcs->get_die_id) {
911                         dev_info(adev->dev, "socket: %d, die: %d "
912                                         "%ld correctable hardware errors "
913                                         "detected in %s block, no user "
914                                         "action is needed.\n",
915                                         adev->smuio.funcs->get_socket_id(adev),
916                                         adev->smuio.funcs->get_die_id(adev),
917                                         obj->err_data.ce_count,
918                                         ras_block_str(info->head.block));
919                 } else {
920                         dev_info(adev->dev, "%ld correctable hardware errors "
921                                         "detected in %s block, no user "
922                                         "action is needed.\n",
923                                         obj->err_data.ce_count,
924                                         ras_block_str(info->head.block));
925                 }
926         }
927         if (err_data.ue_count) {
928                 if (adev->smuio.funcs &&
929                     adev->smuio.funcs->get_socket_id &&
930                     adev->smuio.funcs->get_die_id) {
931                         dev_info(adev->dev, "socket: %d, die: %d "
932                                         "%ld uncorrectable hardware errors "
933                                         "detected in %s block\n",
934                                         adev->smuio.funcs->get_socket_id(adev),
935                                         adev->smuio.funcs->get_die_id(adev),
936                                         obj->err_data.ue_count,
937                                         ras_block_str(info->head.block));
938                 } else {
939                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
940                                         "detected in %s block\n",
941                                         obj->err_data.ue_count,
942                                         ras_block_str(info->head.block));
943                 }
944         }
945
946         return 0;
947 }
948
949 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
950                 enum amdgpu_ras_block block)
951 {
952         if (!amdgpu_ras_is_supported(adev, block))
953                 return -EINVAL;
954
955         switch (block) {
956         case AMDGPU_RAS_BLOCK__GFX:
957                 if (adev->gfx.ras_funcs &&
958                     adev->gfx.ras_funcs->reset_ras_error_count)
959                         adev->gfx.ras_funcs->reset_ras_error_count(adev);
960
961                 if (adev->gfx.ras_funcs &&
962                     adev->gfx.ras_funcs->reset_ras_error_status)
963                         adev->gfx.ras_funcs->reset_ras_error_status(adev);
964                 break;
965         case AMDGPU_RAS_BLOCK__MMHUB:
966                 if (adev->mmhub.ras_funcs &&
967                     adev->mmhub.ras_funcs->reset_ras_error_count)
968                         adev->mmhub.ras_funcs->reset_ras_error_count(adev);
969                 break;
970         case AMDGPU_RAS_BLOCK__SDMA:
971                 if (adev->sdma.funcs->reset_ras_error_count)
972                         adev->sdma.funcs->reset_ras_error_count(adev);
973                 break;
974         case AMDGPU_RAS_BLOCK__HDP:
975                 if (adev->hdp.ras_funcs &&
976                     adev->hdp.ras_funcs->reset_ras_error_count)
977                         adev->hdp.ras_funcs->reset_ras_error_count(adev);
978                 break;
979         default:
980                 break;
981         }
982
983         return 0;
984 }
985
986 /* Trigger XGMI/WAFL error */
987 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
988                                  struct ta_ras_trigger_error_input *block_info)
989 {
990         int ret;
991
992         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
993                 dev_warn(adev->dev, "Failed to disallow df cstate");
994
995         if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
996                 dev_warn(adev->dev, "Failed to disallow XGMI power down");
997
998         ret = psp_ras_trigger_error(&adev->psp, block_info);
999
1000         if (amdgpu_ras_intr_triggered())
1001                 return ret;
1002
1003         if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
1004                 dev_warn(adev->dev, "Failed to allow XGMI power down");
1005
1006         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
1007                 dev_warn(adev->dev, "Failed to allow df cstate");
1008
1009         return ret;
1010 }
1011
1012 /* wrapper of psp_ras_trigger_error */
1013 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1014                 struct ras_inject_if *info)
1015 {
1016         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1017         struct ta_ras_trigger_error_input block_info = {
1018                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1019                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1020                 .sub_block_index = info->head.sub_block_index,
1021                 .address = info->address,
1022                 .value = info->value,
1023         };
1024         int ret = 0;
1025
1026         if (!obj)
1027                 return -EINVAL;
1028
1029         /* Calculate XGMI relative offset */
1030         if (adev->gmc.xgmi.num_physical_nodes > 1) {
1031                 block_info.address =
1032                         amdgpu_xgmi_get_relative_phy_addr(adev,
1033                                                           block_info.address);
1034         }
1035
1036         switch (info->head.block) {
1037         case AMDGPU_RAS_BLOCK__GFX:
1038                 if (adev->gfx.ras_funcs &&
1039                     adev->gfx.ras_funcs->ras_error_inject)
1040                         ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
1041                 else
1042                         ret = -EINVAL;
1043                 break;
1044         case AMDGPU_RAS_BLOCK__UMC:
1045         case AMDGPU_RAS_BLOCK__SDMA:
1046         case AMDGPU_RAS_BLOCK__MMHUB:
1047         case AMDGPU_RAS_BLOCK__PCIE_BIF:
1048                 ret = psp_ras_trigger_error(&adev->psp, &block_info);
1049                 break;
1050         case AMDGPU_RAS_BLOCK__XGMI_WAFL:
1051                 ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
1052                 break;
1053         default:
1054                 dev_info(adev->dev, "%s error injection is not supported yet\n",
1055                          ras_block_str(info->head.block));
1056                 ret = -EINVAL;
1057         }
1058
1059         amdgpu_ras_parse_status_code(adev,
1060                                      "inject",
1061                                      ras_block_str(info->head.block),
1062                                      (enum ta_ras_status)ret);
1063
1064         return ret;
1065 }
1066
1067 /* get the total error counts on all IPs */
1068 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1069                 bool is_ce)
1070 {
1071         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1072         struct ras_manager *obj;
1073         struct ras_err_data data = {0, 0};
1074
1075         if (!adev->ras_enabled || !con)
1076                 return 0;
1077
1078         list_for_each_entry(obj, &con->head, node) {
1079                 struct ras_query_if info = {
1080                         .head = obj->head,
1081                 };
1082
1083                 if (amdgpu_ras_query_error_status(adev, &info))
1084                         return 0;
1085
1086                 data.ce_count += info.ce_count;
1087                 data.ue_count += info.ue_count;
1088         }
1089
1090         return is_ce ? data.ce_count : data.ue_count;
1091 }
1092 /* query/inject/cure end */
1093
1094
1095 /* sysfs begin */
1096
1097 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1098                 struct ras_badpage **bps, unsigned int *count);
1099
1100 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1101 {
1102         switch (flags) {
1103         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1104                 return "R";
1105         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1106                 return "P";
1107         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1108         default:
1109                 return "F";
1110         }
1111 }
1112
1113 /**
1114  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1115  *
1116  * It allows user to read the bad pages of vram on the gpu through
1117  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1118  *
1119  * It outputs multiple lines, and each line stands for one gpu page.
1120  *
1121  * The format of one line is below,
1122  * gpu pfn : gpu page size : flags
1123  *
1124  * gpu pfn and gpu page size are printed in hex format.
1125  * flags can be one of below character,
1126  *
1127  * R: reserved, this gpu page is reserved and not able to use.
1128  *
1129  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1130  * in next window of page_reserve.
1131  *
1132  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1133  *
1134  * Examples:
1135  *
1136  * .. code-block:: bash
1137  *
1138  *      0x00000001 : 0x00001000 : R
1139  *      0x00000002 : 0x00001000 : P
1140  *
1141  */
1142
1143 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1144                 struct kobject *kobj, struct bin_attribute *attr,
1145                 char *buf, loff_t ppos, size_t count)
1146 {
1147         struct amdgpu_ras *con =
1148                 container_of(attr, struct amdgpu_ras, badpages_attr);
1149         struct amdgpu_device *adev = con->adev;
1150         const unsigned int element_size =
1151                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1152         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1153         unsigned int end = div64_ul(ppos + count - 1, element_size);
1154         ssize_t s = 0;
1155         struct ras_badpage *bps = NULL;
1156         unsigned int bps_count = 0;
1157
1158         memset(buf, 0, count);
1159
1160         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1161                 return 0;
1162
1163         for (; start < end && start < bps_count; start++)
1164                 s += scnprintf(&buf[s], element_size + 1,
1165                                 "0x%08x : 0x%08x : %1s\n",
1166                                 bps[start].bp,
1167                                 bps[start].size,
1168                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1169
1170         kfree(bps);
1171
1172         return s;
1173 }
1174
1175 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1176                 struct device_attribute *attr, char *buf)
1177 {
1178         struct amdgpu_ras *con =
1179                 container_of(attr, struct amdgpu_ras, features_attr);
1180
1181         return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1182 }
1183
1184 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1185 {
1186         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1187
1188         sysfs_remove_file_from_group(&adev->dev->kobj,
1189                                 &con->badpages_attr.attr,
1190                                 RAS_FS_NAME);
1191 }
1192
1193 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1194 {
1195         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1196         struct attribute *attrs[] = {
1197                 &con->features_attr.attr,
1198                 NULL
1199         };
1200         struct attribute_group group = {
1201                 .name = RAS_FS_NAME,
1202                 .attrs = attrs,
1203         };
1204
1205         sysfs_remove_group(&adev->dev->kobj, &group);
1206
1207         return 0;
1208 }
1209
1210 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1211                 struct ras_fs_if *head)
1212 {
1213         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1214
1215         if (!obj || obj->attr_inuse)
1216                 return -EINVAL;
1217
1218         get_obj(obj);
1219
1220         memcpy(obj->fs_data.sysfs_name,
1221                         head->sysfs_name,
1222                         sizeof(obj->fs_data.sysfs_name));
1223
1224         obj->sysfs_attr = (struct device_attribute){
1225                 .attr = {
1226                         .name = obj->fs_data.sysfs_name,
1227                         .mode = S_IRUGO,
1228                 },
1229                         .show = amdgpu_ras_sysfs_read,
1230         };
1231         sysfs_attr_init(&obj->sysfs_attr.attr);
1232
1233         if (sysfs_add_file_to_group(&adev->dev->kobj,
1234                                 &obj->sysfs_attr.attr,
1235                                 RAS_FS_NAME)) {
1236                 put_obj(obj);
1237                 return -EINVAL;
1238         }
1239
1240         obj->attr_inuse = 1;
1241
1242         return 0;
1243 }
1244
1245 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1246                 struct ras_common_if *head)
1247 {
1248         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1249
1250         if (!obj || !obj->attr_inuse)
1251                 return -EINVAL;
1252
1253         sysfs_remove_file_from_group(&adev->dev->kobj,
1254                                 &obj->sysfs_attr.attr,
1255                                 RAS_FS_NAME);
1256         obj->attr_inuse = 0;
1257         put_obj(obj);
1258
1259         return 0;
1260 }
1261
1262 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1263 {
1264         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1265         struct ras_manager *obj, *tmp;
1266
1267         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1268                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1269         }
1270
1271         if (amdgpu_bad_page_threshold != 0)
1272                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1273
1274         amdgpu_ras_sysfs_remove_feature_node(adev);
1275
1276         return 0;
1277 }
1278 /* sysfs end */
1279
1280 /**
1281  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1282  *
1283  * Normally when there is an uncorrectable error, the driver will reset
1284  * the GPU to recover.  However, in the event of an unrecoverable error,
1285  * the driver provides an interface to reboot the system automatically
1286  * in that event.
1287  *
1288  * The following file in debugfs provides that interface:
1289  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1290  *
1291  * Usage:
1292  *
1293  * .. code-block:: bash
1294  *
1295  *      echo true > .../ras/auto_reboot
1296  *
1297  */
1298 /* debugfs begin */
1299 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1300 {
1301         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1302         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1303         struct dentry     *dir;
1304
1305         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1306         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1307                             &amdgpu_ras_debugfs_ctrl_ops);
1308         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1309                             &amdgpu_ras_debugfs_eeprom_ops);
1310         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1311                            &con->bad_page_cnt_threshold);
1312         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1313         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1314
1315         /*
1316          * After one uncorrectable error happens, usually GPU recovery will
1317          * be scheduled. But due to the known problem in GPU recovery failing
1318          * to bring GPU back, below interface provides one direct way to
1319          * user to reboot system automatically in such case within
1320          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1321          * will never be called.
1322          */
1323         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1324
1325         /*
1326          * User could set this not to clean up hardware's error count register
1327          * of RAS IPs during ras recovery.
1328          */
1329         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1330                             &con->disable_ras_err_cnt_harvest);
1331         return dir;
1332 }
1333
1334 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1335                                       struct ras_fs_if *head,
1336                                       struct dentry *dir)
1337 {
1338         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1339
1340         if (!obj || !dir)
1341                 return;
1342
1343         get_obj(obj);
1344
1345         memcpy(obj->fs_data.debugfs_name,
1346                         head->debugfs_name,
1347                         sizeof(obj->fs_data.debugfs_name));
1348
1349         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1350                             obj, &amdgpu_ras_debugfs_ops);
1351 }
1352
1353 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1354 {
1355         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1356         struct dentry *dir;
1357         struct ras_manager *obj;
1358         struct ras_fs_if fs_info;
1359
1360         /*
1361          * it won't be called in resume path, no need to check
1362          * suspend and gpu reset status
1363          */
1364         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1365                 return;
1366
1367         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1368
1369         list_for_each_entry(obj, &con->head, node) {
1370                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1371                         (obj->attr_inuse == 1)) {
1372                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1373                                         ras_block_str(obj->head.block));
1374                         fs_info.head = obj->head;
1375                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1376                 }
1377         }
1378 }
1379
1380 /* debugfs end */
1381
1382 /* ras fs */
1383 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1384                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1385 static DEVICE_ATTR(features, S_IRUGO,
1386                 amdgpu_ras_sysfs_features_read, NULL);
1387 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1388 {
1389         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1390         struct attribute_group group = {
1391                 .name = RAS_FS_NAME,
1392         };
1393         struct attribute *attrs[] = {
1394                 &con->features_attr.attr,
1395                 NULL
1396         };
1397         struct bin_attribute *bin_attrs[] = {
1398                 NULL,
1399                 NULL,
1400         };
1401         int r;
1402
1403         /* add features entry */
1404         con->features_attr = dev_attr_features;
1405         group.attrs = attrs;
1406         sysfs_attr_init(attrs[0]);
1407
1408         if (amdgpu_bad_page_threshold != 0) {
1409                 /* add bad_page_features entry */
1410                 bin_attr_gpu_vram_bad_pages.private = NULL;
1411                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1412                 bin_attrs[0] = &con->badpages_attr;
1413                 group.bin_attrs = bin_attrs;
1414                 sysfs_bin_attr_init(bin_attrs[0]);
1415         }
1416
1417         r = sysfs_create_group(&adev->dev->kobj, &group);
1418         if (r)
1419                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1420
1421         return 0;
1422 }
1423
1424 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1425 {
1426         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1427         struct ras_manager *con_obj, *ip_obj, *tmp;
1428
1429         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1430                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1431                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1432                         if (ip_obj)
1433                                 put_obj(ip_obj);
1434                 }
1435         }
1436
1437         amdgpu_ras_sysfs_remove_all(adev);
1438         return 0;
1439 }
1440 /* ras fs end */
1441
1442 /* ih begin */
1443 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1444 {
1445         struct ras_ih_data *data = &obj->ih_data;
1446         struct amdgpu_iv_entry entry;
1447         int ret;
1448         struct ras_err_data err_data = {0, 0, 0, NULL};
1449
1450         while (data->rptr != data->wptr) {
1451                 rmb();
1452                 memcpy(&entry, &data->ring[data->rptr],
1453                                 data->element_size);
1454
1455                 wmb();
1456                 data->rptr = (data->aligned_element_size +
1457                                 data->rptr) % data->ring_size;
1458
1459                 /* Let IP handle its data, maybe we need get the output
1460                  * from the callback to udpate the error type/count, etc
1461                  */
1462                 if (data->cb) {
1463                         ret = data->cb(obj->adev, &err_data, &entry);
1464                         /* ue will trigger an interrupt, and in that case
1465                          * we need do a reset to recovery the whole system.
1466                          * But leave IP do that recovery, here we just dispatch
1467                          * the error.
1468                          */
1469                         if (ret == AMDGPU_RAS_SUCCESS) {
1470                                 /* these counts could be left as 0 if
1471                                  * some blocks do not count error number
1472                                  */
1473                                 obj->err_data.ue_count += err_data.ue_count;
1474                                 obj->err_data.ce_count += err_data.ce_count;
1475                         }
1476                 }
1477         }
1478 }
1479
1480 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1481 {
1482         struct ras_ih_data *data =
1483                 container_of(work, struct ras_ih_data, ih_work);
1484         struct ras_manager *obj =
1485                 container_of(data, struct ras_manager, ih_data);
1486
1487         amdgpu_ras_interrupt_handler(obj);
1488 }
1489
1490 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1491                 struct ras_dispatch_if *info)
1492 {
1493         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1494         struct ras_ih_data *data = &obj->ih_data;
1495
1496         if (!obj)
1497                 return -EINVAL;
1498
1499         if (data->inuse == 0)
1500                 return 0;
1501
1502         /* Might be overflow... */
1503         memcpy(&data->ring[data->wptr], info->entry,
1504                         data->element_size);
1505
1506         wmb();
1507         data->wptr = (data->aligned_element_size +
1508                         data->wptr) % data->ring_size;
1509
1510         schedule_work(&data->ih_work);
1511
1512         return 0;
1513 }
1514
1515 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1516                 struct ras_ih_if *info)
1517 {
1518         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1519         struct ras_ih_data *data;
1520
1521         if (!obj)
1522                 return -EINVAL;
1523
1524         data = &obj->ih_data;
1525         if (data->inuse == 0)
1526                 return 0;
1527
1528         cancel_work_sync(&data->ih_work);
1529
1530         kfree(data->ring);
1531         memset(data, 0, sizeof(*data));
1532         put_obj(obj);
1533
1534         return 0;
1535 }
1536
1537 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1538                 struct ras_ih_if *info)
1539 {
1540         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1541         struct ras_ih_data *data;
1542
1543         if (!obj) {
1544                 /* in case we registe the IH before enable ras feature */
1545                 obj = amdgpu_ras_create_obj(adev, &info->head);
1546                 if (!obj)
1547                         return -EINVAL;
1548         } else
1549                 get_obj(obj);
1550
1551         data = &obj->ih_data;
1552         /* add the callback.etc */
1553         *data = (struct ras_ih_data) {
1554                 .inuse = 0,
1555                 .cb = info->cb,
1556                 .element_size = sizeof(struct amdgpu_iv_entry),
1557                 .rptr = 0,
1558                 .wptr = 0,
1559         };
1560
1561         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1562
1563         data->aligned_element_size = ALIGN(data->element_size, 8);
1564         /* the ring can store 64 iv entries. */
1565         data->ring_size = 64 * data->aligned_element_size;
1566         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1567         if (!data->ring) {
1568                 put_obj(obj);
1569                 return -ENOMEM;
1570         }
1571
1572         /* IH is ready */
1573         data->inuse = 1;
1574
1575         return 0;
1576 }
1577
1578 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1579 {
1580         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1581         struct ras_manager *obj, *tmp;
1582
1583         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1584                 struct ras_ih_if info = {
1585                         .head = obj->head,
1586                 };
1587                 amdgpu_ras_interrupt_remove_handler(adev, &info);
1588         }
1589
1590         return 0;
1591 }
1592 /* ih end */
1593
1594 /* traversal all IPs except NBIO to query error counter */
1595 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1596 {
1597         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1598         struct ras_manager *obj;
1599
1600         if (!adev->ras_enabled || !con)
1601                 return;
1602
1603         list_for_each_entry(obj, &con->head, node) {
1604                 struct ras_query_if info = {
1605                         .head = obj->head,
1606                 };
1607
1608                 /*
1609                  * PCIE_BIF IP has one different isr by ras controller
1610                  * interrupt, the specific ras counter query will be
1611                  * done in that isr. So skip such block from common
1612                  * sync flood interrupt isr calling.
1613                  */
1614                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1615                         continue;
1616
1617                 amdgpu_ras_query_error_status(adev, &info);
1618         }
1619 }
1620
1621 /* Parse RdRspStatus and WrRspStatus */
1622 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1623                                           struct ras_query_if *info)
1624 {
1625         /*
1626          * Only two block need to query read/write
1627          * RspStatus at current state
1628          */
1629         switch (info->head.block) {
1630         case AMDGPU_RAS_BLOCK__GFX:
1631                 if (adev->gfx.ras_funcs &&
1632                     adev->gfx.ras_funcs->query_ras_error_status)
1633                         adev->gfx.ras_funcs->query_ras_error_status(adev);
1634                 break;
1635         case AMDGPU_RAS_BLOCK__MMHUB:
1636                 if (adev->mmhub.ras_funcs &&
1637                     adev->mmhub.ras_funcs->query_ras_error_status)
1638                         adev->mmhub.ras_funcs->query_ras_error_status(adev);
1639                 break;
1640         default:
1641                 break;
1642         }
1643 }
1644
1645 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1646 {
1647         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1648         struct ras_manager *obj;
1649
1650         if (!adev->ras_enabled || !con)
1651                 return;
1652
1653         list_for_each_entry(obj, &con->head, node) {
1654                 struct ras_query_if info = {
1655                         .head = obj->head,
1656                 };
1657
1658                 amdgpu_ras_error_status_query(adev, &info);
1659         }
1660 }
1661
1662 /* recovery begin */
1663
1664 /* return 0 on success.
1665  * caller need free bps.
1666  */
1667 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1668                 struct ras_badpage **bps, unsigned int *count)
1669 {
1670         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1671         struct ras_err_handler_data *data;
1672         int i = 0;
1673         int ret = 0, status;
1674
1675         if (!con || !con->eh_data || !bps || !count)
1676                 return -EINVAL;
1677
1678         mutex_lock(&con->recovery_lock);
1679         data = con->eh_data;
1680         if (!data || data->count == 0) {
1681                 *bps = NULL;
1682                 ret = -EINVAL;
1683                 goto out;
1684         }
1685
1686         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1687         if (!*bps) {
1688                 ret = -ENOMEM;
1689                 goto out;
1690         }
1691
1692         for (; i < data->count; i++) {
1693                 (*bps)[i] = (struct ras_badpage){
1694                         .bp = data->bps[i].retired_page,
1695                         .size = AMDGPU_GPU_PAGE_SIZE,
1696                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1697                 };
1698                 status = amdgpu_vram_mgr_query_page_status(
1699                                 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1700                                 data->bps[i].retired_page);
1701                 if (status == -EBUSY)
1702                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1703                 else if (status == -ENOENT)
1704                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1705         }
1706
1707         *count = data->count;
1708 out:
1709         mutex_unlock(&con->recovery_lock);
1710         return ret;
1711 }
1712
1713 static void amdgpu_ras_do_recovery(struct work_struct *work)
1714 {
1715         struct amdgpu_ras *ras =
1716                 container_of(work, struct amdgpu_ras, recovery_work);
1717         struct amdgpu_device *remote_adev = NULL;
1718         struct amdgpu_device *adev = ras->adev;
1719         struct list_head device_list, *device_list_handle =  NULL;
1720
1721         if (!ras->disable_ras_err_cnt_harvest) {
1722                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1723
1724                 /* Build list of devices to query RAS related errors */
1725                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1726                         device_list_handle = &hive->device_list;
1727                 } else {
1728                         INIT_LIST_HEAD(&device_list);
1729                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
1730                         device_list_handle = &device_list;
1731                 }
1732
1733                 list_for_each_entry(remote_adev,
1734                                 device_list_handle, gmc.xgmi.head) {
1735                         amdgpu_ras_query_err_status(remote_adev);
1736                         amdgpu_ras_log_on_err_counter(remote_adev);
1737                 }
1738
1739                 amdgpu_put_xgmi_hive(hive);
1740         }
1741
1742         if (amdgpu_device_should_recover_gpu(ras->adev))
1743                 amdgpu_device_gpu_recover(ras->adev, NULL);
1744         atomic_set(&ras->in_recovery, 0);
1745 }
1746
1747 /* alloc/realloc bps array */
1748 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1749                 struct ras_err_handler_data *data, int pages)
1750 {
1751         unsigned int old_space = data->count + data->space_left;
1752         unsigned int new_space = old_space + pages;
1753         unsigned int align_space = ALIGN(new_space, 512);
1754         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1755
1756         if (!bps) {
1757                 kfree(bps);
1758                 return -ENOMEM;
1759         }
1760
1761         if (data->bps) {
1762                 memcpy(bps, data->bps,
1763                                 data->count * sizeof(*data->bps));
1764                 kfree(data->bps);
1765         }
1766
1767         data->bps = bps;
1768         data->space_left += align_space - old_space;
1769         return 0;
1770 }
1771
1772 /* it deal with vram only. */
1773 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1774                 struct eeprom_table_record *bps, int pages)
1775 {
1776         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1777         struct ras_err_handler_data *data;
1778         int ret = 0;
1779         uint32_t i;
1780
1781         if (!con || !con->eh_data || !bps || pages <= 0)
1782                 return 0;
1783
1784         mutex_lock(&con->recovery_lock);
1785         data = con->eh_data;
1786         if (!data)
1787                 goto out;
1788
1789         for (i = 0; i < pages; i++) {
1790                 if (amdgpu_ras_check_bad_page_unlock(con,
1791                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1792                         continue;
1793
1794                 if (!data->space_left &&
1795                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
1796                         ret = -ENOMEM;
1797                         goto out;
1798                 }
1799
1800                 amdgpu_vram_mgr_reserve_range(
1801                         ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1802                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1803                         AMDGPU_GPU_PAGE_SIZE);
1804
1805                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1806                 data->count++;
1807                 data->space_left--;
1808         }
1809 out:
1810         mutex_unlock(&con->recovery_lock);
1811
1812         return ret;
1813 }
1814
1815 /*
1816  * write error record array to eeprom, the function should be
1817  * protected by recovery_lock
1818  */
1819 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1820 {
1821         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1822         struct ras_err_handler_data *data;
1823         struct amdgpu_ras_eeprom_control *control;
1824         int save_count;
1825
1826         if (!con || !con->eh_data)
1827                 return 0;
1828
1829         control = &con->eeprom_control;
1830         data = con->eh_data;
1831         save_count = data->count - control->num_recs;
1832         /* only new entries are saved */
1833         if (save_count > 0) {
1834                 if (amdgpu_ras_eeprom_process_recods(control,
1835                                                         &data->bps[control->num_recs],
1836                                                         true,
1837                                                         save_count)) {
1838                         dev_err(adev->dev, "Failed to save EEPROM table data!");
1839                         return -EIO;
1840                 }
1841
1842                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1843         }
1844
1845         return 0;
1846 }
1847
1848 /*
1849  * read error record array in eeprom and reserve enough space for
1850  * storing new bad pages
1851  */
1852 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1853 {
1854         struct amdgpu_ras_eeprom_control *control =
1855                                         &adev->psp.ras.ras->eeprom_control;
1856         struct eeprom_table_record *bps = NULL;
1857         int ret = 0;
1858
1859         /* no bad page record, skip eeprom access */
1860         if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
1861                 return ret;
1862
1863         bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1864         if (!bps)
1865                 return -ENOMEM;
1866
1867         if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1868                 control->num_recs)) {
1869                 dev_err(adev->dev, "Failed to load EEPROM table records!");
1870                 ret = -EIO;
1871                 goto out;
1872         }
1873
1874         ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1875
1876 out:
1877         kfree(bps);
1878         return ret;
1879 }
1880
1881 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
1882                                 uint64_t addr)
1883 {
1884         struct ras_err_handler_data *data = con->eh_data;
1885         int i;
1886
1887         addr >>= AMDGPU_GPU_PAGE_SHIFT;
1888         for (i = 0; i < data->count; i++)
1889                 if (addr == data->bps[i].retired_page)
1890                         return true;
1891
1892         return false;
1893 }
1894
1895 /*
1896  * check if an address belongs to bad page
1897  *
1898  * Note: this check is only for umc block
1899  */
1900 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1901                                 uint64_t addr)
1902 {
1903         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1904         bool ret = false;
1905
1906         if (!con || !con->eh_data)
1907                 return ret;
1908
1909         mutex_lock(&con->recovery_lock);
1910         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
1911         mutex_unlock(&con->recovery_lock);
1912         return ret;
1913 }
1914
1915 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
1916                                         uint32_t max_length)
1917 {
1918         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1919         int tmp_threshold = amdgpu_bad_page_threshold;
1920         u64 val;
1921
1922         /*
1923          * Justification of value bad_page_cnt_threshold in ras structure
1924          *
1925          * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
1926          * in eeprom, and introduce two scenarios accordingly.
1927          *
1928          * Bad page retirement enablement:
1929          *    - If amdgpu_bad_page_threshold = -1,
1930          *      bad_page_cnt_threshold = typical value by formula.
1931          *
1932          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
1933          *      max record length in eeprom, use it directly.
1934          *
1935          * Bad page retirement disablement:
1936          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
1937          *      functionality is disabled, and bad_page_cnt_threshold will
1938          *      take no effect.
1939          */
1940
1941         if (tmp_threshold < -1)
1942                 tmp_threshold = -1;
1943         else if (tmp_threshold > max_length)
1944                 tmp_threshold = max_length;
1945
1946         if (tmp_threshold == -1) {
1947                 val = adev->gmc.mc_vram_size;
1948                 do_div(val, RAS_BAD_PAGE_RATE);
1949                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
1950                                                 max_length);
1951         } else {
1952                 con->bad_page_cnt_threshold = tmp_threshold;
1953         }
1954 }
1955
1956 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1957 {
1958         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1959         struct ras_err_handler_data **data;
1960         uint32_t max_eeprom_records_len = 0;
1961         bool exc_err_limit = false;
1962         int ret;
1963
1964         if (adev->ras_enabled && con)
1965                 data = &con->eh_data;
1966         else
1967                 return 0;
1968
1969         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1970         if (!*data) {
1971                 ret = -ENOMEM;
1972                 goto out;
1973         }
1974
1975         mutex_init(&con->recovery_lock);
1976         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1977         atomic_set(&con->in_recovery, 0);
1978         con->adev = adev;
1979
1980         max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
1981         amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
1982
1983         /* Todo: During test the SMU might fail to read the eeprom through I2C
1984          * when the GPU is pending on XGMI reset during probe time
1985          * (Mostly after second bus reset), skip it now
1986          */
1987         if (adev->gmc.xgmi.pending_reset)
1988                 return 0;
1989         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
1990         /*
1991          * This calling fails when exc_err_limit is true or
1992          * ret != 0.
1993          */
1994         if (exc_err_limit || ret)
1995                 goto free;
1996
1997         if (con->eeprom_control.num_recs) {
1998                 ret = amdgpu_ras_load_bad_pages(adev);
1999                 if (ret)
2000                         goto free;
2001         }
2002
2003         return 0;
2004
2005 free:
2006         kfree((*data)->bps);
2007         kfree(*data);
2008         con->eh_data = NULL;
2009 out:
2010         dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
2011
2012         /*
2013          * Except error threshold exceeding case, other failure cases in this
2014          * function would not fail amdgpu driver init.
2015          */
2016         if (!exc_err_limit)
2017                 ret = 0;
2018         else
2019                 ret = -EINVAL;
2020
2021         return ret;
2022 }
2023
2024 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2025 {
2026         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2027         struct ras_err_handler_data *data = con->eh_data;
2028
2029         /* recovery_init failed to init it, fini is useless */
2030         if (!data)
2031                 return 0;
2032
2033         cancel_work_sync(&con->recovery_work);
2034
2035         mutex_lock(&con->recovery_lock);
2036         con->eh_data = NULL;
2037         kfree(data->bps);
2038         kfree(data);
2039         mutex_unlock(&con->recovery_lock);
2040
2041         return 0;
2042 }
2043 /* recovery end */
2044
2045 /* return 0 if ras will reset gpu and repost.*/
2046 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
2047                 unsigned int block)
2048 {
2049         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2050
2051         if (!ras)
2052                 return -EINVAL;
2053
2054         ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2055         return 0;
2056 }
2057
2058 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2059 {
2060         return adev->asic_type == CHIP_VEGA10 ||
2061                 adev->asic_type == CHIP_VEGA20 ||
2062                 adev->asic_type == CHIP_ARCTURUS ||
2063                 adev->asic_type == CHIP_ALDEBARAN ||
2064                 adev->asic_type == CHIP_SIENNA_CICHLID;
2065 }
2066
2067 /*
2068  * this is workaround for vega20 workstation sku,
2069  * force enable gfx ras, ignore vbios gfx ras flag
2070  * due to GC EDC can not write
2071  */
2072 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2073 {
2074         struct atom_context *ctx = adev->mode_info.atom_context;
2075
2076         if (!ctx)
2077                 return;
2078
2079         if (strnstr(ctx->vbios_version, "D16406",
2080                     sizeof(ctx->vbios_version)))
2081                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2082 }
2083
2084 /*
2085  * check hardware's ras ability which will be saved in hw_supported.
2086  * if hardware does not support ras, we can skip some ras initializtion and
2087  * forbid some ras operations from IP.
2088  * if software itself, say boot parameter, limit the ras ability. We still
2089  * need allow IP do some limited operations, like disable. In such case,
2090  * we have to initialize ras as normal. but need check if operation is
2091  * allowed or not in each function.
2092  */
2093 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2094 {
2095         adev->ras_hw_enabled = adev->ras_enabled = 0;
2096
2097         if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
2098             !amdgpu_ras_asic_supported(adev))
2099                 return;
2100
2101         if (!adev->gmc.xgmi.connected_to_cpu) {
2102                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2103                         dev_info(adev->dev, "MEM ECC is active.\n");
2104                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2105                                                    1 << AMDGPU_RAS_BLOCK__DF);
2106                 } else {
2107                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2108                 }
2109
2110                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2111                         dev_info(adev->dev, "SRAM ECC is active.\n");
2112                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2113                                                     1 << AMDGPU_RAS_BLOCK__DF);
2114                 } else {
2115                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2116                 }
2117         } else {
2118                 /* driver only manages a few IP blocks RAS feature
2119                  * when GPU is connected cpu through XGMI */
2120                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2121                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2122                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2123         }
2124
2125         amdgpu_ras_get_quirks(adev);
2126
2127         /* hw_supported needs to be aligned with RAS block mask. */
2128         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2129
2130         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2131                 adev->ras_hw_enabled & amdgpu_ras_mask;
2132 }
2133
2134 int amdgpu_ras_init(struct amdgpu_device *adev)
2135 {
2136         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2137         int r;
2138
2139         if (con)
2140                 return 0;
2141
2142         con = kmalloc(sizeof(struct amdgpu_ras) +
2143                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
2144                         GFP_KERNEL|__GFP_ZERO);
2145         if (!con)
2146                 return -ENOMEM;
2147
2148         con->objs = (struct ras_manager *)(con + 1);
2149
2150         amdgpu_ras_set_context(adev, con);
2151
2152         amdgpu_ras_check_supported(adev);
2153
2154         if (!adev->ras_hw_enabled || adev->asic_type == CHIP_VEGA10) {
2155                 /* set gfx block ras context feature for VEGA20 Gaming
2156                  * send ras disable cmd to ras ta during ras late init.
2157                  */
2158                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2159                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2160
2161                         return 0;
2162                 }
2163
2164                 r = 0;
2165                 goto release_con;
2166         }
2167
2168         con->features = 0;
2169         INIT_LIST_HEAD(&con->head);
2170         /* Might need get this flag from vbios. */
2171         con->flags = RAS_DEFAULT_FLAGS;
2172
2173         /* initialize nbio ras function ahead of any other
2174          * ras functions so hardware fatal error interrupt
2175          * can be enabled as early as possible */
2176         switch (adev->asic_type) {
2177         case CHIP_VEGA20:
2178         case CHIP_ARCTURUS:
2179         case CHIP_ALDEBARAN:
2180                 if (!adev->gmc.xgmi.connected_to_cpu)
2181                         adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
2182                 break;
2183         default:
2184                 /* nbio ras is not available */
2185                 break;
2186         }
2187
2188         if (adev->nbio.ras_funcs &&
2189             adev->nbio.ras_funcs->init_ras_controller_interrupt) {
2190                 r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
2191                 if (r)
2192                         goto release_con;
2193         }
2194
2195         if (adev->nbio.ras_funcs &&
2196             adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
2197                 r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
2198                 if (r)
2199                         goto release_con;
2200         }
2201
2202         if (amdgpu_ras_fs_init(adev)) {
2203                 r = -EINVAL;
2204                 goto release_con;
2205         }
2206
2207         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2208                  "hardware ability[%x] ras_mask[%x]\n",
2209                  adev->ras_hw_enabled, adev->ras_enabled);
2210
2211         return 0;
2212 release_con:
2213         amdgpu_ras_set_context(adev, NULL);
2214         kfree(con);
2215
2216         return r;
2217 }
2218
2219 static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2220 {
2221         if (adev->gmc.xgmi.connected_to_cpu)
2222                 return 1;
2223         return 0;
2224 }
2225
2226 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2227                                         struct ras_common_if *ras_block)
2228 {
2229         struct ras_query_if info = {
2230                 .head = *ras_block,
2231         };
2232
2233         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2234                 return 0;
2235
2236         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2237                 DRM_WARN("RAS init harvest failure");
2238
2239         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2240                 DRM_WARN("RAS init harvest reset failure");
2241
2242         return 0;
2243 }
2244
2245 /* helper function to handle common stuff in ip late init phase */
2246 int amdgpu_ras_late_init(struct amdgpu_device *adev,
2247                          struct ras_common_if *ras_block,
2248                          struct ras_fs_if *fs_info,
2249                          struct ras_ih_if *ih_info)
2250 {
2251         int r;
2252
2253         /* disable RAS feature per IP block if it is not supported */
2254         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2255                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2256                 return 0;
2257         }
2258
2259         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2260         if (r) {
2261                 if (r == -EAGAIN) {
2262                         /* request gpu reset. will run again */
2263                         amdgpu_ras_request_reset_on_boot(adev,
2264                                         ras_block->block);
2265                         return 0;
2266                 } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
2267                         /* in resume phase, if fail to enable ras,
2268                          * clean up all ras fs nodes, and disable ras */
2269                         goto cleanup;
2270                 } else
2271                         return r;
2272         }
2273
2274         /* check for errors on warm reset edc persisant supported ASIC */
2275         amdgpu_persistent_edc_harvesting(adev, ras_block);
2276
2277         /* in resume phase, no need to create ras fs node */
2278         if (adev->in_suspend || amdgpu_in_reset(adev))
2279                 return 0;
2280
2281         if (ih_info->cb) {
2282                 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2283                 if (r)
2284                         goto interrupt;
2285         }
2286
2287         r = amdgpu_ras_sysfs_create(adev, fs_info);
2288         if (r)
2289                 goto sysfs;
2290
2291         return 0;
2292 cleanup:
2293         amdgpu_ras_sysfs_remove(adev, ras_block);
2294 sysfs:
2295         if (ih_info->cb)
2296                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2297 interrupt:
2298         amdgpu_ras_feature_enable(adev, ras_block, 0);
2299         return r;
2300 }
2301
2302 /* helper function to remove ras fs node and interrupt handler */
2303 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2304                           struct ras_common_if *ras_block,
2305                           struct ras_ih_if *ih_info)
2306 {
2307         if (!ras_block || !ih_info)
2308                 return;
2309
2310         amdgpu_ras_sysfs_remove(adev, ras_block);
2311         if (ih_info->cb)
2312                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2313         amdgpu_ras_feature_enable(adev, ras_block, 0);
2314 }
2315
2316 /* do some init work after IP late init as dependence.
2317  * and it runs in resume/gpu reset/booting up cases.
2318  */
2319 void amdgpu_ras_resume(struct amdgpu_device *adev)
2320 {
2321         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2322         struct ras_manager *obj, *tmp;
2323
2324         if (!adev->ras_enabled || !con) {
2325                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2326                 amdgpu_release_ras_context(adev);
2327
2328                 return;
2329         }
2330
2331         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2332                 /* Set up all other IPs which are not implemented. There is a
2333                  * tricky thing that IP's actual ras error type should be
2334                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2335                  * ERROR_NONE make sense anyway.
2336                  */
2337                 amdgpu_ras_enable_all_features(adev, 1);
2338
2339                 /* We enable ras on all hw_supported block, but as boot
2340                  * parameter might disable some of them and one or more IP has
2341                  * not implemented yet. So we disable them on behalf.
2342                  */
2343                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2344                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2345                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2346                                 /* there should be no any reference. */
2347                                 WARN_ON(alive_obj(obj));
2348                         }
2349                 }
2350         }
2351
2352         if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
2353                 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2354                 /* setup ras obj state as disabled.
2355                  * for init_by_vbios case.
2356                  * if we want to enable ras, just enable it in a normal way.
2357                  * If we want do disable it, need setup ras obj as enabled,
2358                  * then issue another TA disable cmd.
2359                  * See feature_enable_on_boot
2360                  */
2361                 amdgpu_ras_disable_all_features(adev, 1);
2362                 amdgpu_ras_reset_gpu(adev);
2363         }
2364 }
2365
2366 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2367 {
2368         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2369
2370         if (!adev->ras_enabled || !con)
2371                 return;
2372
2373         amdgpu_ras_disable_all_features(adev, 0);
2374         /* Make sure all ras objects are disabled. */
2375         if (con->features)
2376                 amdgpu_ras_disable_all_features(adev, 1);
2377 }
2378
2379 /* do some fini work before IP fini as dependence */
2380 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2381 {
2382         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2383
2384         if (!adev->ras_enabled || !con)
2385                 return 0;
2386
2387         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2388         amdgpu_ras_disable_all_features(adev, 0);
2389         amdgpu_ras_recovery_fini(adev);
2390         return 0;
2391 }
2392
2393 int amdgpu_ras_fini(struct amdgpu_device *adev)
2394 {
2395         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2396
2397         if (!adev->ras_enabled || !con)
2398                 return 0;
2399
2400         amdgpu_ras_fs_fini(adev);
2401         amdgpu_ras_interrupt_remove_all(adev);
2402
2403         WARN(con->features, "Feature mask is not cleared");
2404
2405         if (con->features)
2406                 amdgpu_ras_disable_all_features(adev, 1);
2407
2408         amdgpu_ras_set_context(adev, NULL);
2409         kfree(con);
2410
2411         return 0;
2412 }
2413
2414 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2415 {
2416         amdgpu_ras_check_supported(adev);
2417         if (!adev->ras_hw_enabled)
2418                 return;
2419
2420         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2421                 dev_info(adev->dev, "uncorrectable hardware error"
2422                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2423
2424                 amdgpu_ras_reset_gpu(adev);
2425         }
2426 }
2427
2428 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2429 {
2430         if (adev->asic_type == CHIP_VEGA20 &&
2431             adev->pm.fw_version <= 0x283400) {
2432                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2433                                 amdgpu_ras_intr_triggered();
2434         }
2435
2436         return false;
2437 }
2438
2439 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2440 {
2441         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2442
2443         if (!con)
2444                 return;
2445
2446         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2447                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2448                 amdgpu_ras_set_context(adev, NULL);
2449                 kfree(con);
2450         }
2451 }