drm/amdgpu: add generic pci reset as an option
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84
85 #define AMDGPU_RESUME_MS                2000
86
87 const char *amdgpu_asic_name[] = {
88         "TAHITI",
89         "PITCAIRN",
90         "VERDE",
91         "OLAND",
92         "HAINAN",
93         "BONAIRE",
94         "KAVERI",
95         "KABINI",
96         "HAWAII",
97         "MULLINS",
98         "TOPAZ",
99         "TONGA",
100         "FIJI",
101         "CARRIZO",
102         "STONEY",
103         "POLARIS10",
104         "POLARIS11",
105         "POLARIS12",
106         "VEGAM",
107         "VEGA10",
108         "VEGA12",
109         "VEGA20",
110         "RAVEN",
111         "ARCTURUS",
112         "RENOIR",
113         "NAVI10",
114         "NAVI14",
115         "NAVI12",
116         "SIENNA_CICHLID",
117         "NAVY_FLOUNDER",
118         "VANGOGH",
119         "DIMGREY_CAVEFISH",
120         "LAST",
121 };
122
123 /**
124  * DOC: pcie_replay_count
125  *
126  * The amdgpu driver provides a sysfs API for reporting the total number
127  * of PCIe replays (NAKs)
128  * The file pcie_replay_count is used for this and returns the total
129  * number of replays as a sum of the NAKs generated and NAKs received
130  */
131
132 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
133                 struct device_attribute *attr, char *buf)
134 {
135         struct drm_device *ddev = dev_get_drvdata(dev);
136         struct amdgpu_device *adev = drm_to_adev(ddev);
137         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
138
139         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
140 }
141
142 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
143                 amdgpu_device_get_pcie_replay_count, NULL);
144
145 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
146
147 /**
148  * DOC: product_name
149  *
150  * The amdgpu driver provides a sysfs API for reporting the product name
151  * for the device
152  * The file serial_number is used for this and returns the product name
153  * as returned from the FRU.
154  * NOTE: This is only available for certain server cards
155  */
156
157 static ssize_t amdgpu_device_get_product_name(struct device *dev,
158                 struct device_attribute *attr, char *buf)
159 {
160         struct drm_device *ddev = dev_get_drvdata(dev);
161         struct amdgpu_device *adev = drm_to_adev(ddev);
162
163         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
164 }
165
166 static DEVICE_ATTR(product_name, S_IRUGO,
167                 amdgpu_device_get_product_name, NULL);
168
169 /**
170  * DOC: product_number
171  *
172  * The amdgpu driver provides a sysfs API for reporting the part number
173  * for the device
174  * The file serial_number is used for this and returns the part number
175  * as returned from the FRU.
176  * NOTE: This is only available for certain server cards
177  */
178
179 static ssize_t amdgpu_device_get_product_number(struct device *dev,
180                 struct device_attribute *attr, char *buf)
181 {
182         struct drm_device *ddev = dev_get_drvdata(dev);
183         struct amdgpu_device *adev = drm_to_adev(ddev);
184
185         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
186 }
187
188 static DEVICE_ATTR(product_number, S_IRUGO,
189                 amdgpu_device_get_product_number, NULL);
190
191 /**
192  * DOC: serial_number
193  *
194  * The amdgpu driver provides a sysfs API for reporting the serial number
195  * for the device
196  * The file serial_number is used for this and returns the serial number
197  * as returned from the FRU.
198  * NOTE: This is only available for certain server cards
199  */
200
201 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
202                 struct device_attribute *attr, char *buf)
203 {
204         struct drm_device *ddev = dev_get_drvdata(dev);
205         struct amdgpu_device *adev = drm_to_adev(ddev);
206
207         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
208 }
209
210 static DEVICE_ATTR(serial_number, S_IRUGO,
211                 amdgpu_device_get_serial_number, NULL);
212
213 /**
214  * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
215  *
216  * @dev: drm_device pointer
217  *
218  * Returns true if the device is a dGPU with HG/PX power control,
219  * otherwise return false.
220  */
221 bool amdgpu_device_supports_atpx(struct drm_device *dev)
222 {
223         struct amdgpu_device *adev = drm_to_adev(dev);
224
225         if (adev->flags & AMD_IS_PX)
226                 return true;
227         return false;
228 }
229
230 /**
231  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
232  *
233  * @dev: drm_device pointer
234  *
235  * Returns true if the device is a dGPU with HG/PX power control,
236  * otherwise return false.
237  */
238 bool amdgpu_device_supports_boco(struct drm_device *dev)
239 {
240         struct amdgpu_device *adev = drm_to_adev(dev);
241
242         if (adev->has_pr3)
243                 return true;
244         return false;
245 }
246
247 /**
248  * amdgpu_device_supports_baco - Does the device support BACO
249  *
250  * @dev: drm_device pointer
251  *
252  * Returns true if the device supporte BACO,
253  * otherwise return false.
254  */
255 bool amdgpu_device_supports_baco(struct drm_device *dev)
256 {
257         struct amdgpu_device *adev = drm_to_adev(dev);
258
259         return amdgpu_asic_supports_baco(adev);
260 }
261
262 /*
263  * VRAM access helper functions
264  */
265
266 /**
267  * amdgpu_device_vram_access - read/write a buffer in vram
268  *
269  * @adev: amdgpu_device pointer
270  * @pos: offset of the buffer in vram
271  * @buf: virtual address of the buffer in system memory
272  * @size: read/write size, sizeof(@buf) must > @size
273  * @write: true - write to vram, otherwise - read from vram
274  */
275 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
276                                uint32_t *buf, size_t size, bool write)
277 {
278         unsigned long flags;
279         uint32_t hi = ~0;
280         uint64_t last;
281
282
283 #ifdef CONFIG_64BIT
284         last = min(pos + size, adev->gmc.visible_vram_size);
285         if (last > pos) {
286                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
287                 size_t count = last - pos;
288
289                 if (write) {
290                         memcpy_toio(addr, buf, count);
291                         mb();
292                         amdgpu_asic_flush_hdp(adev, NULL);
293                 } else {
294                         amdgpu_asic_invalidate_hdp(adev, NULL);
295                         mb();
296                         memcpy_fromio(buf, addr, count);
297                 }
298
299                 if (count == size)
300                         return;
301
302                 pos += count;
303                 buf += count / 4;
304                 size -= count;
305         }
306 #endif
307
308         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
309         for (last = pos + size; pos < last; pos += 4) {
310                 uint32_t tmp = pos >> 31;
311
312                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
313                 if (tmp != hi) {
314                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
315                         hi = tmp;
316                 }
317                 if (write)
318                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
319                 else
320                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
321         }
322         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
323 }
324
325 /*
326  * register access helper functions.
327  */
328 /**
329  * amdgpu_device_rreg - read a memory mapped IO or indirect register
330  *
331  * @adev: amdgpu_device pointer
332  * @reg: dword aligned register offset
333  * @acc_flags: access flags which require special behavior
334  *
335  * Returns the 32 bit value from the offset specified.
336  */
337 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
338                             uint32_t reg, uint32_t acc_flags)
339 {
340         uint32_t ret;
341
342         if (adev->in_pci_err_recovery)
343                 return 0;
344
345         if ((reg * 4) < adev->rmmio_size) {
346                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
347                     amdgpu_sriov_runtime(adev) &&
348                     down_read_trylock(&adev->reset_sem)) {
349                         ret = amdgpu_kiq_rreg(adev, reg);
350                         up_read(&adev->reset_sem);
351                 } else {
352                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
353                 }
354         } else {
355                 ret = adev->pcie_rreg(adev, reg * 4);
356         }
357
358         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
359
360         return ret;
361 }
362
363 /*
364  * MMIO register read with bytes helper functions
365  * @offset:bytes offset from MMIO start
366  *
367 */
368
369 /**
370  * amdgpu_mm_rreg8 - read a memory mapped IO register
371  *
372  * @adev: amdgpu_device pointer
373  * @offset: byte aligned register offset
374  *
375  * Returns the 8 bit value from the offset specified.
376  */
377 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
378 {
379         if (adev->in_pci_err_recovery)
380                 return 0;
381
382         if (offset < adev->rmmio_size)
383                 return (readb(adev->rmmio + offset));
384         BUG();
385 }
386
387 /*
388  * MMIO register write with bytes helper functions
389  * @offset:bytes offset from MMIO start
390  * @value: the value want to be written to the register
391  *
392 */
393 /**
394  * amdgpu_mm_wreg8 - read a memory mapped IO register
395  *
396  * @adev: amdgpu_device pointer
397  * @offset: byte aligned register offset
398  * @value: 8 bit value to write
399  *
400  * Writes the value specified to the offset specified.
401  */
402 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
403 {
404         if (adev->in_pci_err_recovery)
405                 return;
406
407         if (offset < adev->rmmio_size)
408                 writeb(value, adev->rmmio + offset);
409         else
410                 BUG();
411 }
412
413 /**
414  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
415  *
416  * @adev: amdgpu_device pointer
417  * @reg: dword aligned register offset
418  * @v: 32 bit value to write to the register
419  * @acc_flags: access flags which require special behavior
420  *
421  * Writes the value specified to the offset specified.
422  */
423 void amdgpu_device_wreg(struct amdgpu_device *adev,
424                         uint32_t reg, uint32_t v,
425                         uint32_t acc_flags)
426 {
427         if (adev->in_pci_err_recovery)
428                 return;
429
430         if ((reg * 4) < adev->rmmio_size) {
431                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
432                     amdgpu_sriov_runtime(adev) &&
433                     down_read_trylock(&adev->reset_sem)) {
434                         amdgpu_kiq_wreg(adev, reg, v);
435                         up_read(&adev->reset_sem);
436                 } else {
437                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
438                 }
439         } else {
440                 adev->pcie_wreg(adev, reg * 4, v);
441         }
442
443         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
444 }
445
446 /*
447  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
448  *
449  * this function is invoked only the debugfs register access
450  * */
451 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
452                              uint32_t reg, uint32_t v)
453 {
454         if (adev->in_pci_err_recovery)
455                 return;
456
457         if (amdgpu_sriov_fullaccess(adev) &&
458             adev->gfx.rlc.funcs &&
459             adev->gfx.rlc.funcs->is_rlcg_access_range) {
460                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
461                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
462         } else {
463                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
464         }
465 }
466
467 /**
468  * amdgpu_io_rreg - read an IO register
469  *
470  * @adev: amdgpu_device pointer
471  * @reg: dword aligned register offset
472  *
473  * Returns the 32 bit value from the offset specified.
474  */
475 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
476 {
477         if (adev->in_pci_err_recovery)
478                 return 0;
479
480         if ((reg * 4) < adev->rio_mem_size)
481                 return ioread32(adev->rio_mem + (reg * 4));
482         else {
483                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
484                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
485         }
486 }
487
488 /**
489  * amdgpu_io_wreg - write to an IO register
490  *
491  * @adev: amdgpu_device pointer
492  * @reg: dword aligned register offset
493  * @v: 32 bit value to write to the register
494  *
495  * Writes the value specified to the offset specified.
496  */
497 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
498 {
499         if (adev->in_pci_err_recovery)
500                 return;
501
502         if ((reg * 4) < adev->rio_mem_size)
503                 iowrite32(v, adev->rio_mem + (reg * 4));
504         else {
505                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
506                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
507         }
508 }
509
510 /**
511  * amdgpu_mm_rdoorbell - read a doorbell dword
512  *
513  * @adev: amdgpu_device pointer
514  * @index: doorbell index
515  *
516  * Returns the value in the doorbell aperture at the
517  * requested doorbell index (CIK).
518  */
519 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
520 {
521         if (adev->in_pci_err_recovery)
522                 return 0;
523
524         if (index < adev->doorbell.num_doorbells) {
525                 return readl(adev->doorbell.ptr + index);
526         } else {
527                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
528                 return 0;
529         }
530 }
531
532 /**
533  * amdgpu_mm_wdoorbell - write a doorbell dword
534  *
535  * @adev: amdgpu_device pointer
536  * @index: doorbell index
537  * @v: value to write
538  *
539  * Writes @v to the doorbell aperture at the
540  * requested doorbell index (CIK).
541  */
542 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
543 {
544         if (adev->in_pci_err_recovery)
545                 return;
546
547         if (index < adev->doorbell.num_doorbells) {
548                 writel(v, adev->doorbell.ptr + index);
549         } else {
550                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
551         }
552 }
553
554 /**
555  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
556  *
557  * @adev: amdgpu_device pointer
558  * @index: doorbell index
559  *
560  * Returns the value in the doorbell aperture at the
561  * requested doorbell index (VEGA10+).
562  */
563 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
564 {
565         if (adev->in_pci_err_recovery)
566                 return 0;
567
568         if (index < adev->doorbell.num_doorbells) {
569                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
570         } else {
571                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
572                 return 0;
573         }
574 }
575
576 /**
577  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
578  *
579  * @adev: amdgpu_device pointer
580  * @index: doorbell index
581  * @v: value to write
582  *
583  * Writes @v to the doorbell aperture at the
584  * requested doorbell index (VEGA10+).
585  */
586 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
587 {
588         if (adev->in_pci_err_recovery)
589                 return;
590
591         if (index < adev->doorbell.num_doorbells) {
592                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
593         } else {
594                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
595         }
596 }
597
598 /**
599  * amdgpu_device_indirect_rreg - read an indirect register
600  *
601  * @adev: amdgpu_device pointer
602  * @pcie_index: mmio register offset
603  * @pcie_data: mmio register offset
604  * @reg_addr: indirect register address to read from
605  *
606  * Returns the value of indirect register @reg_addr
607  */
608 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
609                                 u32 pcie_index, u32 pcie_data,
610                                 u32 reg_addr)
611 {
612         unsigned long flags;
613         u32 r;
614         void __iomem *pcie_index_offset;
615         void __iomem *pcie_data_offset;
616
617         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
618         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
619         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
620
621         writel(reg_addr, pcie_index_offset);
622         readl(pcie_index_offset);
623         r = readl(pcie_data_offset);
624         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
625
626         return r;
627 }
628
629 /**
630  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
631  *
632  * @adev: amdgpu_device pointer
633  * @pcie_index: mmio register offset
634  * @pcie_data: mmio register offset
635  * @reg_addr: indirect register address to read from
636  *
637  * Returns the value of indirect register @reg_addr
638  */
639 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
640                                   u32 pcie_index, u32 pcie_data,
641                                   u32 reg_addr)
642 {
643         unsigned long flags;
644         u64 r;
645         void __iomem *pcie_index_offset;
646         void __iomem *pcie_data_offset;
647
648         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
649         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
650         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
651
652         /* read low 32 bits */
653         writel(reg_addr, pcie_index_offset);
654         readl(pcie_index_offset);
655         r = readl(pcie_data_offset);
656         /* read high 32 bits */
657         writel(reg_addr + 4, pcie_index_offset);
658         readl(pcie_index_offset);
659         r |= ((u64)readl(pcie_data_offset) << 32);
660         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
661
662         return r;
663 }
664
665 /**
666  * amdgpu_device_indirect_wreg - write an indirect register address
667  *
668  * @adev: amdgpu_device pointer
669  * @pcie_index: mmio register offset
670  * @pcie_data: mmio register offset
671  * @reg_addr: indirect register offset
672  * @reg_data: indirect register data
673  *
674  */
675 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
676                                  u32 pcie_index, u32 pcie_data,
677                                  u32 reg_addr, u32 reg_data)
678 {
679         unsigned long flags;
680         void __iomem *pcie_index_offset;
681         void __iomem *pcie_data_offset;
682
683         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
684         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
685         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
686
687         writel(reg_addr, pcie_index_offset);
688         readl(pcie_index_offset);
689         writel(reg_data, pcie_data_offset);
690         readl(pcie_data_offset);
691         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
692 }
693
694 /**
695  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
696  *
697  * @adev: amdgpu_device pointer
698  * @pcie_index: mmio register offset
699  * @pcie_data: mmio register offset
700  * @reg_addr: indirect register offset
701  * @reg_data: indirect register data
702  *
703  */
704 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
705                                    u32 pcie_index, u32 pcie_data,
706                                    u32 reg_addr, u64 reg_data)
707 {
708         unsigned long flags;
709         void __iomem *pcie_index_offset;
710         void __iomem *pcie_data_offset;
711
712         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716         /* write low 32 bits */
717         writel(reg_addr, pcie_index_offset);
718         readl(pcie_index_offset);
719         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
720         readl(pcie_data_offset);
721         /* write high 32 bits */
722         writel(reg_addr + 4, pcie_index_offset);
723         readl(pcie_index_offset);
724         writel((u32)(reg_data >> 32), pcie_data_offset);
725         readl(pcie_data_offset);
726         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
727 }
728
729 /**
730  * amdgpu_invalid_rreg - dummy reg read function
731  *
732  * @adev: amdgpu_device pointer
733  * @reg: offset of register
734  *
735  * Dummy register read function.  Used for register blocks
736  * that certain asics don't have (all asics).
737  * Returns the value in the register.
738  */
739 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
740 {
741         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
742         BUG();
743         return 0;
744 }
745
746 /**
747  * amdgpu_invalid_wreg - dummy reg write function
748  *
749  * @adev: amdgpu_device pointer
750  * @reg: offset of register
751  * @v: value to write to the register
752  *
753  * Dummy register read function.  Used for register blocks
754  * that certain asics don't have (all asics).
755  */
756 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
757 {
758         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
759                   reg, v);
760         BUG();
761 }
762
763 /**
764  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
765  *
766  * @adev: amdgpu_device pointer
767  * @reg: offset of register
768  *
769  * Dummy register read function.  Used for register blocks
770  * that certain asics don't have (all asics).
771  * Returns the value in the register.
772  */
773 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
774 {
775         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
776         BUG();
777         return 0;
778 }
779
780 /**
781  * amdgpu_invalid_wreg64 - dummy reg write function
782  *
783  * @adev: amdgpu_device pointer
784  * @reg: offset of register
785  * @v: value to write to the register
786  *
787  * Dummy register read function.  Used for register blocks
788  * that certain asics don't have (all asics).
789  */
790 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
791 {
792         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
793                   reg, v);
794         BUG();
795 }
796
797 /**
798  * amdgpu_block_invalid_rreg - dummy reg read function
799  *
800  * @adev: amdgpu_device pointer
801  * @block: offset of instance
802  * @reg: offset of register
803  *
804  * Dummy register read function.  Used for register blocks
805  * that certain asics don't have (all asics).
806  * Returns the value in the register.
807  */
808 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
809                                           uint32_t block, uint32_t reg)
810 {
811         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
812                   reg, block);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_block_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @block: offset of instance
822  * @reg: offset of register
823  * @v: value to write to the register
824  *
825  * Dummy register read function.  Used for register blocks
826  * that certain asics don't have (all asics).
827  */
828 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
829                                       uint32_t block,
830                                       uint32_t reg, uint32_t v)
831 {
832         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
833                   reg, block, v);
834         BUG();
835 }
836
837 /**
838  * amdgpu_device_asic_init - Wrapper for atom asic_init
839  *
840  * @adev: amdgpu_device pointer
841  *
842  * Does any asic specific work and then calls atom asic init.
843  */
844 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
845 {
846         amdgpu_asic_pre_asic_init(adev);
847
848         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
849 }
850
851 /**
852  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
853  *
854  * @adev: amdgpu_device pointer
855  *
856  * Allocates a scratch page of VRAM for use by various things in the
857  * driver.
858  */
859 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
860 {
861         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
862                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
863                                        &adev->vram_scratch.robj,
864                                        &adev->vram_scratch.gpu_addr,
865                                        (void **)&adev->vram_scratch.ptr);
866 }
867
868 /**
869  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
870  *
871  * @adev: amdgpu_device pointer
872  *
873  * Frees the VRAM scratch page.
874  */
875 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
876 {
877         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
878 }
879
880 /**
881  * amdgpu_device_program_register_sequence - program an array of registers.
882  *
883  * @adev: amdgpu_device pointer
884  * @registers: pointer to the register array
885  * @array_size: size of the register array
886  *
887  * Programs an array or registers with and and or masks.
888  * This is a helper for setting golden registers.
889  */
890 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
891                                              const u32 *registers,
892                                              const u32 array_size)
893 {
894         u32 tmp, reg, and_mask, or_mask;
895         int i;
896
897         if (array_size % 3)
898                 return;
899
900         for (i = 0; i < array_size; i +=3) {
901                 reg = registers[i + 0];
902                 and_mask = registers[i + 1];
903                 or_mask = registers[i + 2];
904
905                 if (and_mask == 0xffffffff) {
906                         tmp = or_mask;
907                 } else {
908                         tmp = RREG32(reg);
909                         tmp &= ~and_mask;
910                         if (adev->family >= AMDGPU_FAMILY_AI)
911                                 tmp |= (or_mask & and_mask);
912                         else
913                                 tmp |= or_mask;
914                 }
915                 WREG32(reg, tmp);
916         }
917 }
918
919 /**
920  * amdgpu_device_pci_config_reset - reset the GPU
921  *
922  * @adev: amdgpu_device pointer
923  *
924  * Resets the GPU using the pci config reset sequence.
925  * Only applicable to asics prior to vega10.
926  */
927 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
928 {
929         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
930 }
931
932 /**
933  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
938  */
939 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
940 {
941         return pci_reset_function(adev->pdev);
942 }
943
944 /*
945  * GPU doorbell aperture helpers function.
946  */
947 /**
948  * amdgpu_device_doorbell_init - Init doorbell driver information.
949  *
950  * @adev: amdgpu_device pointer
951  *
952  * Init doorbell driver information (CIK)
953  * Returns 0 on success, error on failure.
954  */
955 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
956 {
957
958         /* No doorbell on SI hardware generation */
959         if (adev->asic_type < CHIP_BONAIRE) {
960                 adev->doorbell.base = 0;
961                 adev->doorbell.size = 0;
962                 adev->doorbell.num_doorbells = 0;
963                 adev->doorbell.ptr = NULL;
964                 return 0;
965         }
966
967         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
968                 return -EINVAL;
969
970         amdgpu_asic_init_doorbell_index(adev);
971
972         /* doorbell bar mapping */
973         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
974         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
975
976         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
977                                              adev->doorbell_index.max_assignment+1);
978         if (adev->doorbell.num_doorbells == 0)
979                 return -EINVAL;
980
981         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
982          * paging queue doorbell use the second page. The
983          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
984          * doorbells are in the first page. So with paging queue enabled,
985          * the max num_doorbells should + 1 page (0x400 in dword)
986          */
987         if (adev->asic_type >= CHIP_VEGA10)
988                 adev->doorbell.num_doorbells += 0x400;
989
990         adev->doorbell.ptr = ioremap(adev->doorbell.base,
991                                      adev->doorbell.num_doorbells *
992                                      sizeof(u32));
993         if (adev->doorbell.ptr == NULL)
994                 return -ENOMEM;
995
996         return 0;
997 }
998
999 /**
1000  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1001  *
1002  * @adev: amdgpu_device pointer
1003  *
1004  * Tear down doorbell driver information (CIK)
1005  */
1006 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1007 {
1008         iounmap(adev->doorbell.ptr);
1009         adev->doorbell.ptr = NULL;
1010 }
1011
1012
1013
1014 /*
1015  * amdgpu_device_wb_*()
1016  * Writeback is the method by which the GPU updates special pages in memory
1017  * with the status of certain GPU events (fences, ring pointers,etc.).
1018  */
1019
1020 /**
1021  * amdgpu_device_wb_fini - Disable Writeback and free memory
1022  *
1023  * @adev: amdgpu_device pointer
1024  *
1025  * Disables Writeback and frees the Writeback memory (all asics).
1026  * Used at driver shutdown.
1027  */
1028 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1029 {
1030         if (adev->wb.wb_obj) {
1031                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1032                                       &adev->wb.gpu_addr,
1033                                       (void **)&adev->wb.wb);
1034                 adev->wb.wb_obj = NULL;
1035         }
1036 }
1037
1038 /**
1039  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1040  *
1041  * @adev: amdgpu_device pointer
1042  *
1043  * Initializes writeback and allocates writeback memory (all asics).
1044  * Used at driver startup.
1045  * Returns 0 on success or an -error on failure.
1046  */
1047 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1048 {
1049         int r;
1050
1051         if (adev->wb.wb_obj == NULL) {
1052                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1053                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1054                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1055                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1056                                             (void **)&adev->wb.wb);
1057                 if (r) {
1058                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1059                         return r;
1060                 }
1061
1062                 adev->wb.num_wb = AMDGPU_MAX_WB;
1063                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1064
1065                 /* clear wb memory */
1066                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1067         }
1068
1069         return 0;
1070 }
1071
1072 /**
1073  * amdgpu_device_wb_get - Allocate a wb entry
1074  *
1075  * @adev: amdgpu_device pointer
1076  * @wb: wb index
1077  *
1078  * Allocate a wb slot for use by the driver (all asics).
1079  * Returns 0 on success or -EINVAL on failure.
1080  */
1081 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1082 {
1083         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1084
1085         if (offset < adev->wb.num_wb) {
1086                 __set_bit(offset, adev->wb.used);
1087                 *wb = offset << 3; /* convert to dw offset */
1088                 return 0;
1089         } else {
1090                 return -EINVAL;
1091         }
1092 }
1093
1094 /**
1095  * amdgpu_device_wb_free - Free a wb entry
1096  *
1097  * @adev: amdgpu_device pointer
1098  * @wb: wb index
1099  *
1100  * Free a wb slot allocated for use by the driver (all asics)
1101  */
1102 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1103 {
1104         wb >>= 3;
1105         if (wb < adev->wb.num_wb)
1106                 __clear_bit(wb, adev->wb.used);
1107 }
1108
1109 /**
1110  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1111  *
1112  * @adev: amdgpu_device pointer
1113  *
1114  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1115  * to fail, but if any of the BARs is not accessible after the size we abort
1116  * driver loading by returning -ENODEV.
1117  */
1118 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1119 {
1120         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1121         struct pci_bus *root;
1122         struct resource *res;
1123         unsigned i;
1124         u16 cmd;
1125         int r;
1126
1127         /* Bypass for VF */
1128         if (amdgpu_sriov_vf(adev))
1129                 return 0;
1130
1131         /* skip if the bios has already enabled large BAR */
1132         if (adev->gmc.real_vram_size &&
1133             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1134                 return 0;
1135
1136         /* Check if the root BUS has 64bit memory resources */
1137         root = adev->pdev->bus;
1138         while (root->parent)
1139                 root = root->parent;
1140
1141         pci_bus_for_each_resource(root, res, i) {
1142                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1143                     res->start > 0x100000000ull)
1144                         break;
1145         }
1146
1147         /* Trying to resize is pointless without a root hub window above 4GB */
1148         if (!res)
1149                 return 0;
1150
1151         /* Limit the BAR size to what is available */
1152         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1153                         rbar_size);
1154
1155         /* Disable memory decoding while we change the BAR addresses and size */
1156         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1157         pci_write_config_word(adev->pdev, PCI_COMMAND,
1158                               cmd & ~PCI_COMMAND_MEMORY);
1159
1160         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1161         amdgpu_device_doorbell_fini(adev);
1162         if (adev->asic_type >= CHIP_BONAIRE)
1163                 pci_release_resource(adev->pdev, 2);
1164
1165         pci_release_resource(adev->pdev, 0);
1166
1167         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1168         if (r == -ENOSPC)
1169                 DRM_INFO("Not enough PCI address space for a large BAR.");
1170         else if (r && r != -ENOTSUPP)
1171                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1172
1173         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1174
1175         /* When the doorbell or fb BAR isn't available we have no chance of
1176          * using the device.
1177          */
1178         r = amdgpu_device_doorbell_init(adev);
1179         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1180                 return -ENODEV;
1181
1182         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1183
1184         return 0;
1185 }
1186
1187 /*
1188  * GPU helpers function.
1189  */
1190 /**
1191  * amdgpu_device_need_post - check if the hw need post or not
1192  *
1193  * @adev: amdgpu_device pointer
1194  *
1195  * Check if the asic has been initialized (all asics) at driver startup
1196  * or post is needed if  hw reset is performed.
1197  * Returns true if need or false if not.
1198  */
1199 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1200 {
1201         uint32_t reg;
1202
1203         if (amdgpu_sriov_vf(adev))
1204                 return false;
1205
1206         if (amdgpu_passthrough(adev)) {
1207                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1208                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1209                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1210                  * vpost executed for smc version below 22.15
1211                  */
1212                 if (adev->asic_type == CHIP_FIJI) {
1213                         int err;
1214                         uint32_t fw_ver;
1215                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1216                         /* force vPost if error occured */
1217                         if (err)
1218                                 return true;
1219
1220                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1221                         if (fw_ver < 0x00160e00)
1222                                 return true;
1223                 }
1224         }
1225
1226         if (adev->has_hw_reset) {
1227                 adev->has_hw_reset = false;
1228                 return true;
1229         }
1230
1231         /* bios scratch used on CIK+ */
1232         if (adev->asic_type >= CHIP_BONAIRE)
1233                 return amdgpu_atombios_scratch_need_asic_init(adev);
1234
1235         /* check MEM_SIZE for older asics */
1236         reg = amdgpu_asic_get_config_memsize(adev);
1237
1238         if ((reg != 0) && (reg != 0xffffffff))
1239                 return false;
1240
1241         return true;
1242 }
1243
1244 /* if we get transitioned to only one device, take VGA back */
1245 /**
1246  * amdgpu_device_vga_set_decode - enable/disable vga decode
1247  *
1248  * @cookie: amdgpu_device pointer
1249  * @state: enable/disable vga decode
1250  *
1251  * Enable/disable vga decode (all asics).
1252  * Returns VGA resource flags.
1253  */
1254 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1255 {
1256         struct amdgpu_device *adev = cookie;
1257         amdgpu_asic_set_vga_state(adev, state);
1258         if (state)
1259                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1260                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1261         else
1262                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1263 }
1264
1265 /**
1266  * amdgpu_device_check_block_size - validate the vm block size
1267  *
1268  * @adev: amdgpu_device pointer
1269  *
1270  * Validates the vm block size specified via module parameter.
1271  * The vm block size defines number of bits in page table versus page directory,
1272  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1273  * page table and the remaining bits are in the page directory.
1274  */
1275 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1276 {
1277         /* defines number of bits in page table versus page directory,
1278          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1279          * page table and the remaining bits are in the page directory */
1280         if (amdgpu_vm_block_size == -1)
1281                 return;
1282
1283         if (amdgpu_vm_block_size < 9) {
1284                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1285                          amdgpu_vm_block_size);
1286                 amdgpu_vm_block_size = -1;
1287         }
1288 }
1289
1290 /**
1291  * amdgpu_device_check_vm_size - validate the vm size
1292  *
1293  * @adev: amdgpu_device pointer
1294  *
1295  * Validates the vm size in GB specified via module parameter.
1296  * The VM size is the size of the GPU virtual memory space in GB.
1297  */
1298 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1299 {
1300         /* no need to check the default value */
1301         if (amdgpu_vm_size == -1)
1302                 return;
1303
1304         if (amdgpu_vm_size < 1) {
1305                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1306                          amdgpu_vm_size);
1307                 amdgpu_vm_size = -1;
1308         }
1309 }
1310
1311 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1312 {
1313         struct sysinfo si;
1314         bool is_os_64 = (sizeof(void *) == 8);
1315         uint64_t total_memory;
1316         uint64_t dram_size_seven_GB = 0x1B8000000;
1317         uint64_t dram_size_three_GB = 0xB8000000;
1318
1319         if (amdgpu_smu_memory_pool_size == 0)
1320                 return;
1321
1322         if (!is_os_64) {
1323                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1324                 goto def_value;
1325         }
1326         si_meminfo(&si);
1327         total_memory = (uint64_t)si.totalram * si.mem_unit;
1328
1329         if ((amdgpu_smu_memory_pool_size == 1) ||
1330                 (amdgpu_smu_memory_pool_size == 2)) {
1331                 if (total_memory < dram_size_three_GB)
1332                         goto def_value1;
1333         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1334                 (amdgpu_smu_memory_pool_size == 8)) {
1335                 if (total_memory < dram_size_seven_GB)
1336                         goto def_value1;
1337         } else {
1338                 DRM_WARN("Smu memory pool size not supported\n");
1339                 goto def_value;
1340         }
1341         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1342
1343         return;
1344
1345 def_value1:
1346         DRM_WARN("No enough system memory\n");
1347 def_value:
1348         adev->pm.smu_prv_buffer_size = 0;
1349 }
1350
1351 /**
1352  * amdgpu_device_check_arguments - validate module params
1353  *
1354  * @adev: amdgpu_device pointer
1355  *
1356  * Validates certain module parameters and updates
1357  * the associated values used by the driver (all asics).
1358  */
1359 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1360 {
1361         if (amdgpu_sched_jobs < 4) {
1362                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1363                          amdgpu_sched_jobs);
1364                 amdgpu_sched_jobs = 4;
1365         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1366                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1367                          amdgpu_sched_jobs);
1368                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1369         }
1370
1371         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1372                 /* gart size must be greater or equal to 32M */
1373                 dev_warn(adev->dev, "gart size (%d) too small\n",
1374                          amdgpu_gart_size);
1375                 amdgpu_gart_size = -1;
1376         }
1377
1378         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1379                 /* gtt size must be greater or equal to 32M */
1380                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1381                                  amdgpu_gtt_size);
1382                 amdgpu_gtt_size = -1;
1383         }
1384
1385         /* valid range is between 4 and 9 inclusive */
1386         if (amdgpu_vm_fragment_size != -1 &&
1387             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1388                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1389                 amdgpu_vm_fragment_size = -1;
1390         }
1391
1392         if (amdgpu_sched_hw_submission < 2) {
1393                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1394                          amdgpu_sched_hw_submission);
1395                 amdgpu_sched_hw_submission = 2;
1396         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1397                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1398                          amdgpu_sched_hw_submission);
1399                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1400         }
1401
1402         amdgpu_device_check_smu_prv_buffer_size(adev);
1403
1404         amdgpu_device_check_vm_size(adev);
1405
1406         amdgpu_device_check_block_size(adev);
1407
1408         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1409
1410         amdgpu_gmc_tmz_set(adev);
1411
1412         amdgpu_gmc_noretry_set(adev);
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * amdgpu_switcheroo_set_state - set switcheroo state
1419  *
1420  * @pdev: pci dev pointer
1421  * @state: vga_switcheroo state
1422  *
1423  * Callback for the switcheroo driver.  Suspends or resumes the
1424  * the asics before or after it is powered up using ACPI methods.
1425  */
1426 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1427                                         enum vga_switcheroo_state state)
1428 {
1429         struct drm_device *dev = pci_get_drvdata(pdev);
1430         int r;
1431
1432         if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
1433                 return;
1434
1435         if (state == VGA_SWITCHEROO_ON) {
1436                 pr_info("switched on\n");
1437                 /* don't suspend or resume card normally */
1438                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1439
1440                 pci_set_power_state(pdev, PCI_D0);
1441                 amdgpu_device_load_pci_state(pdev);
1442                 r = pci_enable_device(pdev);
1443                 if (r)
1444                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1445                 amdgpu_device_resume(dev, true);
1446
1447                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1448                 drm_kms_helper_poll_enable(dev);
1449         } else {
1450                 pr_info("switched off\n");
1451                 drm_kms_helper_poll_disable(dev);
1452                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1453                 amdgpu_device_suspend(dev, true);
1454                 amdgpu_device_cache_pci_state(pdev);
1455                 /* Shut down the device */
1456                 pci_disable_device(pdev);
1457                 pci_set_power_state(pdev, PCI_D3cold);
1458                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1459         }
1460 }
1461
1462 /**
1463  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1464  *
1465  * @pdev: pci dev pointer
1466  *
1467  * Callback for the switcheroo driver.  Check of the switcheroo
1468  * state can be changed.
1469  * Returns true if the state can be changed, false if not.
1470  */
1471 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1472 {
1473         struct drm_device *dev = pci_get_drvdata(pdev);
1474
1475         /*
1476         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1477         * locking inversion with the driver load path. And the access here is
1478         * completely racy anyway. So don't bother with locking for now.
1479         */
1480         return atomic_read(&dev->open_count) == 0;
1481 }
1482
1483 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1484         .set_gpu_state = amdgpu_switcheroo_set_state,
1485         .reprobe = NULL,
1486         .can_switch = amdgpu_switcheroo_can_switch,
1487 };
1488
1489 /**
1490  * amdgpu_device_ip_set_clockgating_state - set the CG state
1491  *
1492  * @dev: amdgpu_device pointer
1493  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1494  * @state: clockgating state (gate or ungate)
1495  *
1496  * Sets the requested clockgating state for all instances of
1497  * the hardware IP specified.
1498  * Returns the error code from the last instance.
1499  */
1500 int amdgpu_device_ip_set_clockgating_state(void *dev,
1501                                            enum amd_ip_block_type block_type,
1502                                            enum amd_clockgating_state state)
1503 {
1504         struct amdgpu_device *adev = dev;
1505         int i, r = 0;
1506
1507         for (i = 0; i < adev->num_ip_blocks; i++) {
1508                 if (!adev->ip_blocks[i].status.valid)
1509                         continue;
1510                 if (adev->ip_blocks[i].version->type != block_type)
1511                         continue;
1512                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1513                         continue;
1514                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1515                         (void *)adev, state);
1516                 if (r)
1517                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1518                                   adev->ip_blocks[i].version->funcs->name, r);
1519         }
1520         return r;
1521 }
1522
1523 /**
1524  * amdgpu_device_ip_set_powergating_state - set the PG state
1525  *
1526  * @dev: amdgpu_device pointer
1527  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1528  * @state: powergating state (gate or ungate)
1529  *
1530  * Sets the requested powergating state for all instances of
1531  * the hardware IP specified.
1532  * Returns the error code from the last instance.
1533  */
1534 int amdgpu_device_ip_set_powergating_state(void *dev,
1535                                            enum amd_ip_block_type block_type,
1536                                            enum amd_powergating_state state)
1537 {
1538         struct amdgpu_device *adev = dev;
1539         int i, r = 0;
1540
1541         for (i = 0; i < adev->num_ip_blocks; i++) {
1542                 if (!adev->ip_blocks[i].status.valid)
1543                         continue;
1544                 if (adev->ip_blocks[i].version->type != block_type)
1545                         continue;
1546                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1547                         continue;
1548                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1549                         (void *)adev, state);
1550                 if (r)
1551                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1552                                   adev->ip_blocks[i].version->funcs->name, r);
1553         }
1554         return r;
1555 }
1556
1557 /**
1558  * amdgpu_device_ip_get_clockgating_state - get the CG state
1559  *
1560  * @adev: amdgpu_device pointer
1561  * @flags: clockgating feature flags
1562  *
1563  * Walks the list of IPs on the device and updates the clockgating
1564  * flags for each IP.
1565  * Updates @flags with the feature flags for each hardware IP where
1566  * clockgating is enabled.
1567  */
1568 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1569                                             u32 *flags)
1570 {
1571         int i;
1572
1573         for (i = 0; i < adev->num_ip_blocks; i++) {
1574                 if (!adev->ip_blocks[i].status.valid)
1575                         continue;
1576                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1577                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1578         }
1579 }
1580
1581 /**
1582  * amdgpu_device_ip_wait_for_idle - wait for idle
1583  *
1584  * @adev: amdgpu_device pointer
1585  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1586  *
1587  * Waits for the request hardware IP to be idle.
1588  * Returns 0 for success or a negative error code on failure.
1589  */
1590 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1591                                    enum amd_ip_block_type block_type)
1592 {
1593         int i, r;
1594
1595         for (i = 0; i < adev->num_ip_blocks; i++) {
1596                 if (!adev->ip_blocks[i].status.valid)
1597                         continue;
1598                 if (adev->ip_blocks[i].version->type == block_type) {
1599                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1600                         if (r)
1601                                 return r;
1602                         break;
1603                 }
1604         }
1605         return 0;
1606
1607 }
1608
1609 /**
1610  * amdgpu_device_ip_is_idle - is the hardware IP idle
1611  *
1612  * @adev: amdgpu_device pointer
1613  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1614  *
1615  * Check if the hardware IP is idle or not.
1616  * Returns true if it the IP is idle, false if not.
1617  */
1618 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1619                               enum amd_ip_block_type block_type)
1620 {
1621         int i;
1622
1623         for (i = 0; i < adev->num_ip_blocks; i++) {
1624                 if (!adev->ip_blocks[i].status.valid)
1625                         continue;
1626                 if (adev->ip_blocks[i].version->type == block_type)
1627                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1628         }
1629         return true;
1630
1631 }
1632
1633 /**
1634  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1635  *
1636  * @adev: amdgpu_device pointer
1637  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1638  *
1639  * Returns a pointer to the hardware IP block structure
1640  * if it exists for the asic, otherwise NULL.
1641  */
1642 struct amdgpu_ip_block *
1643 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1644                               enum amd_ip_block_type type)
1645 {
1646         int i;
1647
1648         for (i = 0; i < adev->num_ip_blocks; i++)
1649                 if (adev->ip_blocks[i].version->type == type)
1650                         return &adev->ip_blocks[i];
1651
1652         return NULL;
1653 }
1654
1655 /**
1656  * amdgpu_device_ip_block_version_cmp
1657  *
1658  * @adev: amdgpu_device pointer
1659  * @type: enum amd_ip_block_type
1660  * @major: major version
1661  * @minor: minor version
1662  *
1663  * return 0 if equal or greater
1664  * return 1 if smaller or the ip_block doesn't exist
1665  */
1666 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1667                                        enum amd_ip_block_type type,
1668                                        u32 major, u32 minor)
1669 {
1670         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1671
1672         if (ip_block && ((ip_block->version->major > major) ||
1673                         ((ip_block->version->major == major) &&
1674                         (ip_block->version->minor >= minor))))
1675                 return 0;
1676
1677         return 1;
1678 }
1679
1680 /**
1681  * amdgpu_device_ip_block_add
1682  *
1683  * @adev: amdgpu_device pointer
1684  * @ip_block_version: pointer to the IP to add
1685  *
1686  * Adds the IP block driver information to the collection of IPs
1687  * on the asic.
1688  */
1689 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1690                                const struct amdgpu_ip_block_version *ip_block_version)
1691 {
1692         if (!ip_block_version)
1693                 return -EINVAL;
1694
1695         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1696                   ip_block_version->funcs->name);
1697
1698         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1699
1700         return 0;
1701 }
1702
1703 /**
1704  * amdgpu_device_enable_virtual_display - enable virtual display feature
1705  *
1706  * @adev: amdgpu_device pointer
1707  *
1708  * Enabled the virtual display feature if the user has enabled it via
1709  * the module parameter virtual_display.  This feature provides a virtual
1710  * display hardware on headless boards or in virtualized environments.
1711  * This function parses and validates the configuration string specified by
1712  * the user and configues the virtual display configuration (number of
1713  * virtual connectors, crtcs, etc.) specified.
1714  */
1715 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1716 {
1717         adev->enable_virtual_display = false;
1718
1719         if (amdgpu_virtual_display) {
1720                 const char *pci_address_name = pci_name(adev->pdev);
1721                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1722
1723                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1724                 pciaddstr_tmp = pciaddstr;
1725                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1726                         pciaddname = strsep(&pciaddname_tmp, ",");
1727                         if (!strcmp("all", pciaddname)
1728                             || !strcmp(pci_address_name, pciaddname)) {
1729                                 long num_crtc;
1730                                 int res = -1;
1731
1732                                 adev->enable_virtual_display = true;
1733
1734                                 if (pciaddname_tmp)
1735                                         res = kstrtol(pciaddname_tmp, 10,
1736                                                       &num_crtc);
1737
1738                                 if (!res) {
1739                                         if (num_crtc < 1)
1740                                                 num_crtc = 1;
1741                                         if (num_crtc > 6)
1742                                                 num_crtc = 6;
1743                                         adev->mode_info.num_crtc = num_crtc;
1744                                 } else {
1745                                         adev->mode_info.num_crtc = 1;
1746                                 }
1747                                 break;
1748                         }
1749                 }
1750
1751                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1752                          amdgpu_virtual_display, pci_address_name,
1753                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1754
1755                 kfree(pciaddstr);
1756         }
1757 }
1758
1759 /**
1760  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1761  *
1762  * @adev: amdgpu_device pointer
1763  *
1764  * Parses the asic configuration parameters specified in the gpu info
1765  * firmware and makes them availale to the driver for use in configuring
1766  * the asic.
1767  * Returns 0 on success, -EINVAL on failure.
1768  */
1769 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1770 {
1771         const char *chip_name;
1772         char fw_name[40];
1773         int err;
1774         const struct gpu_info_firmware_header_v1_0 *hdr;
1775
1776         adev->firmware.gpu_info_fw = NULL;
1777
1778         if (adev->mman.discovery_bin) {
1779                 amdgpu_discovery_get_gfx_info(adev);
1780
1781                 /*
1782                  * FIXME: The bounding box is still needed by Navi12, so
1783                  * temporarily read it from gpu_info firmware. Should be droped
1784                  * when DAL no longer needs it.
1785                  */
1786                 if (adev->asic_type != CHIP_NAVI12)
1787                         return 0;
1788         }
1789
1790         switch (adev->asic_type) {
1791 #ifdef CONFIG_DRM_AMDGPU_SI
1792         case CHIP_VERDE:
1793         case CHIP_TAHITI:
1794         case CHIP_PITCAIRN:
1795         case CHIP_OLAND:
1796         case CHIP_HAINAN:
1797 #endif
1798 #ifdef CONFIG_DRM_AMDGPU_CIK
1799         case CHIP_BONAIRE:
1800         case CHIP_HAWAII:
1801         case CHIP_KAVERI:
1802         case CHIP_KABINI:
1803         case CHIP_MULLINS:
1804 #endif
1805         case CHIP_TOPAZ:
1806         case CHIP_TONGA:
1807         case CHIP_FIJI:
1808         case CHIP_POLARIS10:
1809         case CHIP_POLARIS11:
1810         case CHIP_POLARIS12:
1811         case CHIP_VEGAM:
1812         case CHIP_CARRIZO:
1813         case CHIP_STONEY:
1814         case CHIP_VEGA20:
1815         case CHIP_SIENNA_CICHLID:
1816         case CHIP_NAVY_FLOUNDER:
1817         case CHIP_DIMGREY_CAVEFISH:
1818         default:
1819                 return 0;
1820         case CHIP_VEGA10:
1821                 chip_name = "vega10";
1822                 break;
1823         case CHIP_VEGA12:
1824                 chip_name = "vega12";
1825                 break;
1826         case CHIP_RAVEN:
1827                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1828                         chip_name = "raven2";
1829                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1830                         chip_name = "picasso";
1831                 else
1832                         chip_name = "raven";
1833                 break;
1834         case CHIP_ARCTURUS:
1835                 chip_name = "arcturus";
1836                 break;
1837         case CHIP_RENOIR:
1838                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1839                         chip_name = "renoir";
1840                 else
1841                         chip_name = "green_sardine";
1842                 break;
1843         case CHIP_NAVI10:
1844                 chip_name = "navi10";
1845                 break;
1846         case CHIP_NAVI14:
1847                 chip_name = "navi14";
1848                 break;
1849         case CHIP_NAVI12:
1850                 chip_name = "navi12";
1851                 break;
1852         case CHIP_VANGOGH:
1853                 chip_name = "vangogh";
1854                 break;
1855         }
1856
1857         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1858         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1859         if (err) {
1860                 dev_err(adev->dev,
1861                         "Failed to load gpu_info firmware \"%s\"\n",
1862                         fw_name);
1863                 goto out;
1864         }
1865         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1866         if (err) {
1867                 dev_err(adev->dev,
1868                         "Failed to validate gpu_info firmware \"%s\"\n",
1869                         fw_name);
1870                 goto out;
1871         }
1872
1873         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1874         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1875
1876         switch (hdr->version_major) {
1877         case 1:
1878         {
1879                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1880                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1881                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1882
1883                 /*
1884                  * Should be droped when DAL no longer needs it.
1885                  */
1886                 if (adev->asic_type == CHIP_NAVI12)
1887                         goto parse_soc_bounding_box;
1888
1889                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1890                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1891                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1892                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1893                 adev->gfx.config.max_texture_channel_caches =
1894                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1895                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1896                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1897                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1898                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1899                 adev->gfx.config.double_offchip_lds_buf =
1900                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1901                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1902                 adev->gfx.cu_info.max_waves_per_simd =
1903                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1904                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1905                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1906                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1907                 if (hdr->version_minor >= 1) {
1908                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1909                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1910                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1911                         adev->gfx.config.num_sc_per_sh =
1912                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1913                         adev->gfx.config.num_packer_per_sc =
1914                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1915                 }
1916
1917 parse_soc_bounding_box:
1918                 /*
1919                  * soc bounding box info is not integrated in disocovery table,
1920                  * we always need to parse it from gpu info firmware if needed.
1921                  */
1922                 if (hdr->version_minor == 2) {
1923                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1924                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1925                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1926                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1927                 }
1928                 break;
1929         }
1930         default:
1931                 dev_err(adev->dev,
1932                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1933                 err = -EINVAL;
1934                 goto out;
1935         }
1936 out:
1937         return err;
1938 }
1939
1940 /**
1941  * amdgpu_device_ip_early_init - run early init for hardware IPs
1942  *
1943  * @adev: amdgpu_device pointer
1944  *
1945  * Early initialization pass for hardware IPs.  The hardware IPs that make
1946  * up each asic are discovered each IP's early_init callback is run.  This
1947  * is the first stage in initializing the asic.
1948  * Returns 0 on success, negative error code on failure.
1949  */
1950 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1951 {
1952         int i, r;
1953
1954         amdgpu_device_enable_virtual_display(adev);
1955
1956         if (amdgpu_sriov_vf(adev)) {
1957                 r = amdgpu_virt_request_full_gpu(adev, true);
1958                 if (r)
1959                         return r;
1960         }
1961
1962         switch (adev->asic_type) {
1963 #ifdef CONFIG_DRM_AMDGPU_SI
1964         case CHIP_VERDE:
1965         case CHIP_TAHITI:
1966         case CHIP_PITCAIRN:
1967         case CHIP_OLAND:
1968         case CHIP_HAINAN:
1969                 adev->family = AMDGPU_FAMILY_SI;
1970                 r = si_set_ip_blocks(adev);
1971                 if (r)
1972                         return r;
1973                 break;
1974 #endif
1975 #ifdef CONFIG_DRM_AMDGPU_CIK
1976         case CHIP_BONAIRE:
1977         case CHIP_HAWAII:
1978         case CHIP_KAVERI:
1979         case CHIP_KABINI:
1980         case CHIP_MULLINS:
1981                 if (adev->flags & AMD_IS_APU)
1982                         adev->family = AMDGPU_FAMILY_KV;
1983                 else
1984                         adev->family = AMDGPU_FAMILY_CI;
1985
1986                 r = cik_set_ip_blocks(adev);
1987                 if (r)
1988                         return r;
1989                 break;
1990 #endif
1991         case CHIP_TOPAZ:
1992         case CHIP_TONGA:
1993         case CHIP_FIJI:
1994         case CHIP_POLARIS10:
1995         case CHIP_POLARIS11:
1996         case CHIP_POLARIS12:
1997         case CHIP_VEGAM:
1998         case CHIP_CARRIZO:
1999         case CHIP_STONEY:
2000                 if (adev->flags & AMD_IS_APU)
2001                         adev->family = AMDGPU_FAMILY_CZ;
2002                 else
2003                         adev->family = AMDGPU_FAMILY_VI;
2004
2005                 r = vi_set_ip_blocks(adev);
2006                 if (r)
2007                         return r;
2008                 break;
2009         case CHIP_VEGA10:
2010         case CHIP_VEGA12:
2011         case CHIP_VEGA20:
2012         case CHIP_RAVEN:
2013         case CHIP_ARCTURUS:
2014         case CHIP_RENOIR:
2015                 if (adev->flags & AMD_IS_APU)
2016                         adev->family = AMDGPU_FAMILY_RV;
2017                 else
2018                         adev->family = AMDGPU_FAMILY_AI;
2019
2020                 r = soc15_set_ip_blocks(adev);
2021                 if (r)
2022                         return r;
2023                 break;
2024         case  CHIP_NAVI10:
2025         case  CHIP_NAVI14:
2026         case  CHIP_NAVI12:
2027         case  CHIP_SIENNA_CICHLID:
2028         case  CHIP_NAVY_FLOUNDER:
2029         case  CHIP_DIMGREY_CAVEFISH:
2030         case CHIP_VANGOGH:
2031                 if (adev->asic_type == CHIP_VANGOGH)
2032                         adev->family = AMDGPU_FAMILY_VGH;
2033                 else
2034                         adev->family = AMDGPU_FAMILY_NV;
2035
2036                 r = nv_set_ip_blocks(adev);
2037                 if (r)
2038                         return r;
2039                 break;
2040         default:
2041                 /* FIXME: not supported yet */
2042                 return -EINVAL;
2043         }
2044
2045         amdgpu_amdkfd_device_probe(adev);
2046
2047         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2048         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2049                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2050
2051         for (i = 0; i < adev->num_ip_blocks; i++) {
2052                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2053                         DRM_ERROR("disabled ip block: %d <%s>\n",
2054                                   i, adev->ip_blocks[i].version->funcs->name);
2055                         adev->ip_blocks[i].status.valid = false;
2056                 } else {
2057                         if (adev->ip_blocks[i].version->funcs->early_init) {
2058                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2059                                 if (r == -ENOENT) {
2060                                         adev->ip_blocks[i].status.valid = false;
2061                                 } else if (r) {
2062                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2063                                                   adev->ip_blocks[i].version->funcs->name, r);
2064                                         return r;
2065                                 } else {
2066                                         adev->ip_blocks[i].status.valid = true;
2067                                 }
2068                         } else {
2069                                 adev->ip_blocks[i].status.valid = true;
2070                         }
2071                 }
2072                 /* get the vbios after the asic_funcs are set up */
2073                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2074                         r = amdgpu_device_parse_gpu_info_fw(adev);
2075                         if (r)
2076                                 return r;
2077
2078                         /* Read BIOS */
2079                         if (!amdgpu_get_bios(adev))
2080                                 return -EINVAL;
2081
2082                         r = amdgpu_atombios_init(adev);
2083                         if (r) {
2084                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2085                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2086                                 return r;
2087                         }
2088                 }
2089         }
2090
2091         adev->cg_flags &= amdgpu_cg_mask;
2092         adev->pg_flags &= amdgpu_pg_mask;
2093
2094         return 0;
2095 }
2096
2097 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2098 {
2099         int i, r;
2100
2101         for (i = 0; i < adev->num_ip_blocks; i++) {
2102                 if (!adev->ip_blocks[i].status.sw)
2103                         continue;
2104                 if (adev->ip_blocks[i].status.hw)
2105                         continue;
2106                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2107                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2108                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2109                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2110                         if (r) {
2111                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2112                                           adev->ip_blocks[i].version->funcs->name, r);
2113                                 return r;
2114                         }
2115                         adev->ip_blocks[i].status.hw = true;
2116                 }
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2123 {
2124         int i, r;
2125
2126         for (i = 0; i < adev->num_ip_blocks; i++) {
2127                 if (!adev->ip_blocks[i].status.sw)
2128                         continue;
2129                 if (adev->ip_blocks[i].status.hw)
2130                         continue;
2131                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2132                 if (r) {
2133                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2134                                   adev->ip_blocks[i].version->funcs->name, r);
2135                         return r;
2136                 }
2137                 adev->ip_blocks[i].status.hw = true;
2138         }
2139
2140         return 0;
2141 }
2142
2143 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2144 {
2145         int r = 0;
2146         int i;
2147         uint32_t smu_version;
2148
2149         if (adev->asic_type >= CHIP_VEGA10) {
2150                 for (i = 0; i < adev->num_ip_blocks; i++) {
2151                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2152                                 continue;
2153
2154                         /* no need to do the fw loading again if already done*/
2155                         if (adev->ip_blocks[i].status.hw == true)
2156                                 break;
2157
2158                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2159                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2160                                 if (r) {
2161                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2162                                                           adev->ip_blocks[i].version->funcs->name, r);
2163                                         return r;
2164                                 }
2165                         } else {
2166                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2167                                 if (r) {
2168                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2169                                                           adev->ip_blocks[i].version->funcs->name, r);
2170                                         return r;
2171                                 }
2172                         }
2173
2174                         adev->ip_blocks[i].status.hw = true;
2175                         break;
2176                 }
2177         }
2178
2179         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2180                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2181
2182         return r;
2183 }
2184
2185 /**
2186  * amdgpu_device_ip_init - run init for hardware IPs
2187  *
2188  * @adev: amdgpu_device pointer
2189  *
2190  * Main initialization pass for hardware IPs.  The list of all the hardware
2191  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2192  * are run.  sw_init initializes the software state associated with each IP
2193  * and hw_init initializes the hardware associated with each IP.
2194  * Returns 0 on success, negative error code on failure.
2195  */
2196 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2197 {
2198         int i, r;
2199
2200         r = amdgpu_ras_init(adev);
2201         if (r)
2202                 return r;
2203
2204         for (i = 0; i < adev->num_ip_blocks; i++) {
2205                 if (!adev->ip_blocks[i].status.valid)
2206                         continue;
2207                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2208                 if (r) {
2209                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2210                                   adev->ip_blocks[i].version->funcs->name, r);
2211                         goto init_failed;
2212                 }
2213                 adev->ip_blocks[i].status.sw = true;
2214
2215                 /* need to do gmc hw init early so we can allocate gpu mem */
2216                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2217                         r = amdgpu_device_vram_scratch_init(adev);
2218                         if (r) {
2219                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2220                                 goto init_failed;
2221                         }
2222                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2223                         if (r) {
2224                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2225                                 goto init_failed;
2226                         }
2227                         r = amdgpu_device_wb_init(adev);
2228                         if (r) {
2229                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2230                                 goto init_failed;
2231                         }
2232                         adev->ip_blocks[i].status.hw = true;
2233
2234                         /* right after GMC hw init, we create CSA */
2235                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2236                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2237                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2238                                                                 AMDGPU_CSA_SIZE);
2239                                 if (r) {
2240                                         DRM_ERROR("allocate CSA failed %d\n", r);
2241                                         goto init_failed;
2242                                 }
2243                         }
2244                 }
2245         }
2246
2247         if (amdgpu_sriov_vf(adev))
2248                 amdgpu_virt_init_data_exchange(adev);
2249
2250         r = amdgpu_ib_pool_init(adev);
2251         if (r) {
2252                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2253                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2254                 goto init_failed;
2255         }
2256
2257         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2258         if (r)
2259                 goto init_failed;
2260
2261         r = amdgpu_device_ip_hw_init_phase1(adev);
2262         if (r)
2263                 goto init_failed;
2264
2265         r = amdgpu_device_fw_loading(adev);
2266         if (r)
2267                 goto init_failed;
2268
2269         r = amdgpu_device_ip_hw_init_phase2(adev);
2270         if (r)
2271                 goto init_failed;
2272
2273         /*
2274          * retired pages will be loaded from eeprom and reserved here,
2275          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2276          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2277          * for I2C communication which only true at this point.
2278          *
2279          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2280          * failure from bad gpu situation and stop amdgpu init process
2281          * accordingly. For other failed cases, it will still release all
2282          * the resource and print error message, rather than returning one
2283          * negative value to upper level.
2284          *
2285          * Note: theoretically, this should be called before all vram allocations
2286          * to protect retired page from abusing
2287          */
2288         r = amdgpu_ras_recovery_init(adev);
2289         if (r)
2290                 goto init_failed;
2291
2292         if (adev->gmc.xgmi.num_physical_nodes > 1)
2293                 amdgpu_xgmi_add_device(adev);
2294         amdgpu_amdkfd_device_init(adev);
2295
2296         amdgpu_fru_get_product_info(adev);
2297
2298 init_failed:
2299         if (amdgpu_sriov_vf(adev))
2300                 amdgpu_virt_release_full_gpu(adev, true);
2301
2302         return r;
2303 }
2304
2305 /**
2306  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2307  *
2308  * @adev: amdgpu_device pointer
2309  *
2310  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2311  * this function before a GPU reset.  If the value is retained after a
2312  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2313  */
2314 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2315 {
2316         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2317 }
2318
2319 /**
2320  * amdgpu_device_check_vram_lost - check if vram is valid
2321  *
2322  * @adev: amdgpu_device pointer
2323  *
2324  * Checks the reset magic value written to the gart pointer in VRAM.
2325  * The driver calls this after a GPU reset to see if the contents of
2326  * VRAM is lost or now.
2327  * returns true if vram is lost, false if not.
2328  */
2329 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2330 {
2331         if (memcmp(adev->gart.ptr, adev->reset_magic,
2332                         AMDGPU_RESET_MAGIC_NUM))
2333                 return true;
2334
2335         if (!amdgpu_in_reset(adev))
2336                 return false;
2337
2338         /*
2339          * For all ASICs with baco/mode1 reset, the VRAM is
2340          * always assumed to be lost.
2341          */
2342         switch (amdgpu_asic_reset_method(adev)) {
2343         case AMD_RESET_METHOD_BACO:
2344         case AMD_RESET_METHOD_MODE1:
2345                 return true;
2346         default:
2347                 return false;
2348         }
2349 }
2350
2351 /**
2352  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2353  *
2354  * @adev: amdgpu_device pointer
2355  * @state: clockgating state (gate or ungate)
2356  *
2357  * The list of all the hardware IPs that make up the asic is walked and the
2358  * set_clockgating_state callbacks are run.
2359  * Late initialization pass enabling clockgating for hardware IPs.
2360  * Fini or suspend, pass disabling clockgating for hardware IPs.
2361  * Returns 0 on success, negative error code on failure.
2362  */
2363
2364 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2365                                                 enum amd_clockgating_state state)
2366 {
2367         int i, j, r;
2368
2369         if (amdgpu_emu_mode == 1)
2370                 return 0;
2371
2372         for (j = 0; j < adev->num_ip_blocks; j++) {
2373                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2374                 if (!adev->ip_blocks[i].status.late_initialized)
2375                         continue;
2376                 /* skip CG for VCE/UVD, it's handled specially */
2377                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2378                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2379                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2380                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2381                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2382                         /* enable clockgating to save power */
2383                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2384                                                                                      state);
2385                         if (r) {
2386                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2387                                           adev->ip_blocks[i].version->funcs->name, r);
2388                                 return r;
2389                         }
2390                 }
2391         }
2392
2393         return 0;
2394 }
2395
2396 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2397 {
2398         int i, j, r;
2399
2400         if (amdgpu_emu_mode == 1)
2401                 return 0;
2402
2403         for (j = 0; j < adev->num_ip_blocks; j++) {
2404                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2405                 if (!adev->ip_blocks[i].status.late_initialized)
2406                         continue;
2407                 /* skip CG for VCE/UVD, it's handled specially */
2408                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2409                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2410                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2411                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2412                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2413                         /* enable powergating to save power */
2414                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2415                                                                                         state);
2416                         if (r) {
2417                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2418                                           adev->ip_blocks[i].version->funcs->name, r);
2419                                 return r;
2420                         }
2421                 }
2422         }
2423         return 0;
2424 }
2425
2426 static int amdgpu_device_enable_mgpu_fan_boost(void)
2427 {
2428         struct amdgpu_gpu_instance *gpu_ins;
2429         struct amdgpu_device *adev;
2430         int i, ret = 0;
2431
2432         mutex_lock(&mgpu_info.mutex);
2433
2434         /*
2435          * MGPU fan boost feature should be enabled
2436          * only when there are two or more dGPUs in
2437          * the system
2438          */
2439         if (mgpu_info.num_dgpu < 2)
2440                 goto out;
2441
2442         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2443                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2444                 adev = gpu_ins->adev;
2445                 if (!(adev->flags & AMD_IS_APU) &&
2446                     !gpu_ins->mgpu_fan_enabled) {
2447                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2448                         if (ret)
2449                                 break;
2450
2451                         gpu_ins->mgpu_fan_enabled = 1;
2452                 }
2453         }
2454
2455 out:
2456         mutex_unlock(&mgpu_info.mutex);
2457
2458         return ret;
2459 }
2460
2461 /**
2462  * amdgpu_device_ip_late_init - run late init for hardware IPs
2463  *
2464  * @adev: amdgpu_device pointer
2465  *
2466  * Late initialization pass for hardware IPs.  The list of all the hardware
2467  * IPs that make up the asic is walked and the late_init callbacks are run.
2468  * late_init covers any special initialization that an IP requires
2469  * after all of the have been initialized or something that needs to happen
2470  * late in the init process.
2471  * Returns 0 on success, negative error code on failure.
2472  */
2473 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2474 {
2475         struct amdgpu_gpu_instance *gpu_instance;
2476         int i = 0, r;
2477
2478         for (i = 0; i < adev->num_ip_blocks; i++) {
2479                 if (!adev->ip_blocks[i].status.hw)
2480                         continue;
2481                 if (adev->ip_blocks[i].version->funcs->late_init) {
2482                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2483                         if (r) {
2484                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2485                                           adev->ip_blocks[i].version->funcs->name, r);
2486                                 return r;
2487                         }
2488                 }
2489                 adev->ip_blocks[i].status.late_initialized = true;
2490         }
2491
2492         amdgpu_ras_set_error_query_ready(adev, true);
2493
2494         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2495         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2496
2497         amdgpu_device_fill_reset_magic(adev);
2498
2499         r = amdgpu_device_enable_mgpu_fan_boost();
2500         if (r)
2501                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2502
2503
2504         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2505                 mutex_lock(&mgpu_info.mutex);
2506
2507                 /*
2508                  * Reset device p-state to low as this was booted with high.
2509                  *
2510                  * This should be performed only after all devices from the same
2511                  * hive get initialized.
2512                  *
2513                  * However, it's unknown how many device in the hive in advance.
2514                  * As this is counted one by one during devices initializations.
2515                  *
2516                  * So, we wait for all XGMI interlinked devices initialized.
2517                  * This may bring some delays as those devices may come from
2518                  * different hives. But that should be OK.
2519                  */
2520                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2521                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2522                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2523                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2524                                         continue;
2525
2526                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2527                                                 AMDGPU_XGMI_PSTATE_MIN);
2528                                 if (r) {
2529                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2530                                         break;
2531                                 }
2532                         }
2533                 }
2534
2535                 mutex_unlock(&mgpu_info.mutex);
2536         }
2537
2538         return 0;
2539 }
2540
2541 /**
2542  * amdgpu_device_ip_fini - run fini for hardware IPs
2543  *
2544  * @adev: amdgpu_device pointer
2545  *
2546  * Main teardown pass for hardware IPs.  The list of all the hardware
2547  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2548  * are run.  hw_fini tears down the hardware associated with each IP
2549  * and sw_fini tears down any software state associated with each IP.
2550  * Returns 0 on success, negative error code on failure.
2551  */
2552 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2553 {
2554         int i, r;
2555
2556         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2557                 amdgpu_virt_release_ras_err_handler_data(adev);
2558
2559         amdgpu_ras_pre_fini(adev);
2560
2561         if (adev->gmc.xgmi.num_physical_nodes > 1)
2562                 amdgpu_xgmi_remove_device(adev);
2563
2564         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2565         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2566
2567         amdgpu_amdkfd_device_fini(adev);
2568
2569         /* need to disable SMC first */
2570         for (i = 0; i < adev->num_ip_blocks; i++) {
2571                 if (!adev->ip_blocks[i].status.hw)
2572                         continue;
2573                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2574                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2575                         /* XXX handle errors */
2576                         if (r) {
2577                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2578                                           adev->ip_blocks[i].version->funcs->name, r);
2579                         }
2580                         adev->ip_blocks[i].status.hw = false;
2581                         break;
2582                 }
2583         }
2584
2585         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2586                 if (!adev->ip_blocks[i].status.hw)
2587                         continue;
2588
2589                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2590                 /* XXX handle errors */
2591                 if (r) {
2592                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2593                                   adev->ip_blocks[i].version->funcs->name, r);
2594                 }
2595
2596                 adev->ip_blocks[i].status.hw = false;
2597         }
2598
2599
2600         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2601                 if (!adev->ip_blocks[i].status.sw)
2602                         continue;
2603
2604                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2605                         amdgpu_ucode_free_bo(adev);
2606                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2607                         amdgpu_device_wb_fini(adev);
2608                         amdgpu_device_vram_scratch_fini(adev);
2609                         amdgpu_ib_pool_fini(adev);
2610                 }
2611
2612                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2613                 /* XXX handle errors */
2614                 if (r) {
2615                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2616                                   adev->ip_blocks[i].version->funcs->name, r);
2617                 }
2618                 adev->ip_blocks[i].status.sw = false;
2619                 adev->ip_blocks[i].status.valid = false;
2620         }
2621
2622         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2623                 if (!adev->ip_blocks[i].status.late_initialized)
2624                         continue;
2625                 if (adev->ip_blocks[i].version->funcs->late_fini)
2626                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2627                 adev->ip_blocks[i].status.late_initialized = false;
2628         }
2629
2630         amdgpu_ras_fini(adev);
2631
2632         if (amdgpu_sriov_vf(adev))
2633                 if (amdgpu_virt_release_full_gpu(adev, false))
2634                         DRM_ERROR("failed to release exclusive mode on fini\n");
2635
2636         return 0;
2637 }
2638
2639 /**
2640  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2641  *
2642  * @work: work_struct.
2643  */
2644 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2645 {
2646         struct amdgpu_device *adev =
2647                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2648         int r;
2649
2650         r = amdgpu_ib_ring_tests(adev);
2651         if (r)
2652                 DRM_ERROR("ib ring test failed (%d).\n", r);
2653 }
2654
2655 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2656 {
2657         struct amdgpu_device *adev =
2658                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2659
2660         mutex_lock(&adev->gfx.gfx_off_mutex);
2661         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2662                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2663                         adev->gfx.gfx_off_state = true;
2664         }
2665         mutex_unlock(&adev->gfx.gfx_off_mutex);
2666 }
2667
2668 /**
2669  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2670  *
2671  * @adev: amdgpu_device pointer
2672  *
2673  * Main suspend function for hardware IPs.  The list of all the hardware
2674  * IPs that make up the asic is walked, clockgating is disabled and the
2675  * suspend callbacks are run.  suspend puts the hardware and software state
2676  * in each IP into a state suitable for suspend.
2677  * Returns 0 on success, negative error code on failure.
2678  */
2679 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2680 {
2681         int i, r;
2682
2683         if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
2684                 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2685                 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2686         }
2687
2688         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2689                 if (!adev->ip_blocks[i].status.valid)
2690                         continue;
2691
2692                 /* displays are handled separately */
2693                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2694                         continue;
2695
2696                 /* XXX handle errors */
2697                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2698                 /* XXX handle errors */
2699                 if (r) {
2700                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2701                                   adev->ip_blocks[i].version->funcs->name, r);
2702                         return r;
2703                 }
2704
2705                 adev->ip_blocks[i].status.hw = false;
2706         }
2707
2708         return 0;
2709 }
2710
2711 /**
2712  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2713  *
2714  * @adev: amdgpu_device pointer
2715  *
2716  * Main suspend function for hardware IPs.  The list of all the hardware
2717  * IPs that make up the asic is walked, clockgating is disabled and the
2718  * suspend callbacks are run.  suspend puts the hardware and software state
2719  * in each IP into a state suitable for suspend.
2720  * Returns 0 on success, negative error code on failure.
2721  */
2722 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2723 {
2724         int i, r;
2725
2726         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2727                 if (!adev->ip_blocks[i].status.valid)
2728                         continue;
2729                 /* displays are handled in phase1 */
2730                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2731                         continue;
2732                 /* PSP lost connection when err_event_athub occurs */
2733                 if (amdgpu_ras_intr_triggered() &&
2734                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2735                         adev->ip_blocks[i].status.hw = false;
2736                         continue;
2737                 }
2738                 /* XXX handle errors */
2739                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2740                 /* XXX handle errors */
2741                 if (r) {
2742                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2743                                   adev->ip_blocks[i].version->funcs->name, r);
2744                 }
2745                 adev->ip_blocks[i].status.hw = false;
2746                 /* handle putting the SMC in the appropriate state */
2747                 if(!amdgpu_sriov_vf(adev)){
2748                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2749                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2750                                 if (r) {
2751                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2752                                                         adev->mp1_state, r);
2753                                         return r;
2754                                 }
2755                         }
2756                 }
2757                 adev->ip_blocks[i].status.hw = false;
2758         }
2759
2760         return 0;
2761 }
2762
2763 /**
2764  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2765  *
2766  * @adev: amdgpu_device pointer
2767  *
2768  * Main suspend function for hardware IPs.  The list of all the hardware
2769  * IPs that make up the asic is walked, clockgating is disabled and the
2770  * suspend callbacks are run.  suspend puts the hardware and software state
2771  * in each IP into a state suitable for suspend.
2772  * Returns 0 on success, negative error code on failure.
2773  */
2774 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2775 {
2776         int r;
2777
2778         if (amdgpu_sriov_vf(adev))
2779                 amdgpu_virt_request_full_gpu(adev, false);
2780
2781         r = amdgpu_device_ip_suspend_phase1(adev);
2782         if (r)
2783                 return r;
2784         r = amdgpu_device_ip_suspend_phase2(adev);
2785
2786         if (amdgpu_sriov_vf(adev))
2787                 amdgpu_virt_release_full_gpu(adev, false);
2788
2789         return r;
2790 }
2791
2792 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2793 {
2794         int i, r;
2795
2796         static enum amd_ip_block_type ip_order[] = {
2797                 AMD_IP_BLOCK_TYPE_GMC,
2798                 AMD_IP_BLOCK_TYPE_COMMON,
2799                 AMD_IP_BLOCK_TYPE_PSP,
2800                 AMD_IP_BLOCK_TYPE_IH,
2801         };
2802
2803         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2804                 int j;
2805                 struct amdgpu_ip_block *block;
2806
2807                 block = &adev->ip_blocks[i];
2808                 block->status.hw = false;
2809
2810                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2811
2812                         if (block->version->type != ip_order[j] ||
2813                                 !block->status.valid)
2814                                 continue;
2815
2816                         r = block->version->funcs->hw_init(adev);
2817                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2818                         if (r)
2819                                 return r;
2820                         block->status.hw = true;
2821                 }
2822         }
2823
2824         return 0;
2825 }
2826
2827 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2828 {
2829         int i, r;
2830
2831         static enum amd_ip_block_type ip_order[] = {
2832                 AMD_IP_BLOCK_TYPE_SMC,
2833                 AMD_IP_BLOCK_TYPE_DCE,
2834                 AMD_IP_BLOCK_TYPE_GFX,
2835                 AMD_IP_BLOCK_TYPE_SDMA,
2836                 AMD_IP_BLOCK_TYPE_UVD,
2837                 AMD_IP_BLOCK_TYPE_VCE,
2838                 AMD_IP_BLOCK_TYPE_VCN
2839         };
2840
2841         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2842                 int j;
2843                 struct amdgpu_ip_block *block;
2844
2845                 for (j = 0; j < adev->num_ip_blocks; j++) {
2846                         block = &adev->ip_blocks[j];
2847
2848                         if (block->version->type != ip_order[i] ||
2849                                 !block->status.valid ||
2850                                 block->status.hw)
2851                                 continue;
2852
2853                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2854                                 r = block->version->funcs->resume(adev);
2855                         else
2856                                 r = block->version->funcs->hw_init(adev);
2857
2858                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2859                         if (r)
2860                                 return r;
2861                         block->status.hw = true;
2862                 }
2863         }
2864
2865         return 0;
2866 }
2867
2868 /**
2869  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2870  *
2871  * @adev: amdgpu_device pointer
2872  *
2873  * First resume function for hardware IPs.  The list of all the hardware
2874  * IPs that make up the asic is walked and the resume callbacks are run for
2875  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2876  * after a suspend and updates the software state as necessary.  This
2877  * function is also used for restoring the GPU after a GPU reset.
2878  * Returns 0 on success, negative error code on failure.
2879  */
2880 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2881 {
2882         int i, r;
2883
2884         for (i = 0; i < adev->num_ip_blocks; i++) {
2885                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2886                         continue;
2887                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2888                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2889                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2890
2891                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2892                         if (r) {
2893                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2894                                           adev->ip_blocks[i].version->funcs->name, r);
2895                                 return r;
2896                         }
2897                         adev->ip_blocks[i].status.hw = true;
2898                 }
2899         }
2900
2901         return 0;
2902 }
2903
2904 /**
2905  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2906  *
2907  * @adev: amdgpu_device pointer
2908  *
2909  * First resume function for hardware IPs.  The list of all the hardware
2910  * IPs that make up the asic is walked and the resume callbacks are run for
2911  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2912  * functional state after a suspend and updates the software state as
2913  * necessary.  This function is also used for restoring the GPU after a GPU
2914  * reset.
2915  * Returns 0 on success, negative error code on failure.
2916  */
2917 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2918 {
2919         int i, r;
2920
2921         for (i = 0; i < adev->num_ip_blocks; i++) {
2922                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2923                         continue;
2924                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2925                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2926                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2927                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2928                         continue;
2929                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2930                 if (r) {
2931                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2932                                   adev->ip_blocks[i].version->funcs->name, r);
2933                         return r;
2934                 }
2935                 adev->ip_blocks[i].status.hw = true;
2936         }
2937
2938         return 0;
2939 }
2940
2941 /**
2942  * amdgpu_device_ip_resume - run resume for hardware IPs
2943  *
2944  * @adev: amdgpu_device pointer
2945  *
2946  * Main resume function for hardware IPs.  The hardware IPs
2947  * are split into two resume functions because they are
2948  * are also used in in recovering from a GPU reset and some additional
2949  * steps need to be take between them.  In this case (S3/S4) they are
2950  * run sequentially.
2951  * Returns 0 on success, negative error code on failure.
2952  */
2953 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2954 {
2955         int r;
2956
2957         r = amdgpu_device_ip_resume_phase1(adev);
2958         if (r)
2959                 return r;
2960
2961         r = amdgpu_device_fw_loading(adev);
2962         if (r)
2963                 return r;
2964
2965         r = amdgpu_device_ip_resume_phase2(adev);
2966
2967         return r;
2968 }
2969
2970 /**
2971  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2972  *
2973  * @adev: amdgpu_device pointer
2974  *
2975  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2976  */
2977 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2978 {
2979         if (amdgpu_sriov_vf(adev)) {
2980                 if (adev->is_atom_fw) {
2981                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2982                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2983                 } else {
2984                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2985                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2986                 }
2987
2988                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2989                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2990         }
2991 }
2992
2993 /**
2994  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2995  *
2996  * @asic_type: AMD asic type
2997  *
2998  * Check if there is DC (new modesetting infrastructre) support for an asic.
2999  * returns true if DC has support, false if not.
3000  */
3001 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3002 {
3003         switch (asic_type) {
3004 #if defined(CONFIG_DRM_AMD_DC)
3005 #if defined(CONFIG_DRM_AMD_DC_SI)
3006         case CHIP_TAHITI:
3007         case CHIP_PITCAIRN:
3008         case CHIP_VERDE:
3009         case CHIP_OLAND:
3010 #endif
3011         case CHIP_BONAIRE:
3012         case CHIP_KAVERI:
3013         case CHIP_KABINI:
3014         case CHIP_MULLINS:
3015                 /*
3016                  * We have systems in the wild with these ASICs that require
3017                  * LVDS and VGA support which is not supported with DC.
3018                  *
3019                  * Fallback to the non-DC driver here by default so as not to
3020                  * cause regressions.
3021                  */
3022                 return amdgpu_dc > 0;
3023         case CHIP_HAWAII:
3024         case CHIP_CARRIZO:
3025         case CHIP_STONEY:
3026         case CHIP_POLARIS10:
3027         case CHIP_POLARIS11:
3028         case CHIP_POLARIS12:
3029         case CHIP_VEGAM:
3030         case CHIP_TONGA:
3031         case CHIP_FIJI:
3032         case CHIP_VEGA10:
3033         case CHIP_VEGA12:
3034         case CHIP_VEGA20:
3035 #if defined(CONFIG_DRM_AMD_DC_DCN)
3036         case CHIP_RAVEN:
3037         case CHIP_NAVI10:
3038         case CHIP_NAVI14:
3039         case CHIP_NAVI12:
3040         case CHIP_RENOIR:
3041         case CHIP_SIENNA_CICHLID:
3042         case CHIP_NAVY_FLOUNDER:
3043         case CHIP_DIMGREY_CAVEFISH:
3044         case CHIP_VANGOGH:
3045 #endif
3046                 return amdgpu_dc != 0;
3047 #endif
3048         default:
3049                 if (amdgpu_dc > 0)
3050                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3051                                          "but isn't supported by ASIC, ignoring\n");
3052                 return false;
3053         }
3054 }
3055
3056 /**
3057  * amdgpu_device_has_dc_support - check if dc is supported
3058  *
3059  * @adev: amdgpu_device pointer
3060  *
3061  * Returns true for supported, false for not supported
3062  */
3063 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3064 {
3065         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3066                 return false;
3067
3068         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3069 }
3070
3071
3072 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3073 {
3074         struct amdgpu_device *adev =
3075                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3076         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3077
3078         /* It's a bug to not have a hive within this function */
3079         if (WARN_ON(!hive))
3080                 return;
3081
3082         /*
3083          * Use task barrier to synchronize all xgmi reset works across the
3084          * hive. task_barrier_enter and task_barrier_exit will block
3085          * until all the threads running the xgmi reset works reach
3086          * those points. task_barrier_full will do both blocks.
3087          */
3088         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3089
3090                 task_barrier_enter(&hive->tb);
3091                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3092
3093                 if (adev->asic_reset_res)
3094                         goto fail;
3095
3096                 task_barrier_exit(&hive->tb);
3097                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3098
3099                 if (adev->asic_reset_res)
3100                         goto fail;
3101
3102                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3103                         adev->mmhub.funcs->reset_ras_error_count(adev);
3104         } else {
3105
3106                 task_barrier_full(&hive->tb);
3107                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3108         }
3109
3110 fail:
3111         if (adev->asic_reset_res)
3112                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3113                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3114         amdgpu_put_xgmi_hive(hive);
3115 }
3116
3117 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3118 {
3119         char *input = amdgpu_lockup_timeout;
3120         char *timeout_setting = NULL;
3121         int index = 0;
3122         long timeout;
3123         int ret = 0;
3124
3125         /*
3126          * By default timeout for non compute jobs is 10000.
3127          * And there is no timeout enforced on compute jobs.
3128          * In SR-IOV or passthrough mode, timeout for compute
3129          * jobs are 60000 by default.
3130          */
3131         adev->gfx_timeout = msecs_to_jiffies(10000);
3132         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3133         if (amdgpu_sriov_vf(adev))
3134                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3135                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3136         else if (amdgpu_passthrough(adev))
3137                 adev->compute_timeout =  msecs_to_jiffies(60000);
3138         else
3139                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3140
3141         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3142                 while ((timeout_setting = strsep(&input, ",")) &&
3143                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3144                         ret = kstrtol(timeout_setting, 0, &timeout);
3145                         if (ret)
3146                                 return ret;
3147
3148                         if (timeout == 0) {
3149                                 index++;
3150                                 continue;
3151                         } else if (timeout < 0) {
3152                                 timeout = MAX_SCHEDULE_TIMEOUT;
3153                         } else {
3154                                 timeout = msecs_to_jiffies(timeout);
3155                         }
3156
3157                         switch (index++) {
3158                         case 0:
3159                                 adev->gfx_timeout = timeout;
3160                                 break;
3161                         case 1:
3162                                 adev->compute_timeout = timeout;
3163                                 break;
3164                         case 2:
3165                                 adev->sdma_timeout = timeout;
3166                                 break;
3167                         case 3:
3168                                 adev->video_timeout = timeout;
3169                                 break;
3170                         default:
3171                                 break;
3172                         }
3173                 }
3174                 /*
3175                  * There is only one value specified and
3176                  * it should apply to all non-compute jobs.
3177                  */
3178                 if (index == 1) {
3179                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3180                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3181                                 adev->compute_timeout = adev->gfx_timeout;
3182                 }
3183         }
3184
3185         return ret;
3186 }
3187
3188 static const struct attribute *amdgpu_dev_attributes[] = {
3189         &dev_attr_product_name.attr,
3190         &dev_attr_product_number.attr,
3191         &dev_attr_serial_number.attr,
3192         &dev_attr_pcie_replay_count.attr,
3193         NULL
3194 };
3195
3196
3197 /**
3198  * amdgpu_device_init - initialize the driver
3199  *
3200  * @adev: amdgpu_device pointer
3201  * @flags: driver flags
3202  *
3203  * Initializes the driver info and hw (all asics).
3204  * Returns 0 for success or an error on failure.
3205  * Called at driver startup.
3206  */
3207 int amdgpu_device_init(struct amdgpu_device *adev,
3208                        uint32_t flags)
3209 {
3210         struct drm_device *ddev = adev_to_drm(adev);
3211         struct pci_dev *pdev = adev->pdev;
3212         int r, i;
3213         bool atpx = false;
3214         u32 max_MBps;
3215
3216         adev->shutdown = false;
3217         adev->flags = flags;
3218
3219         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3220                 adev->asic_type = amdgpu_force_asic_type;
3221         else
3222                 adev->asic_type = flags & AMD_ASIC_MASK;
3223
3224         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3225         if (amdgpu_emu_mode == 1)
3226                 adev->usec_timeout *= 10;
3227         adev->gmc.gart_size = 512 * 1024 * 1024;
3228         adev->accel_working = false;
3229         adev->num_rings = 0;
3230         adev->mman.buffer_funcs = NULL;
3231         adev->mman.buffer_funcs_ring = NULL;
3232         adev->vm_manager.vm_pte_funcs = NULL;
3233         adev->vm_manager.vm_pte_num_scheds = 0;
3234         adev->gmc.gmc_funcs = NULL;
3235         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3236         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3237
3238         adev->smc_rreg = &amdgpu_invalid_rreg;
3239         adev->smc_wreg = &amdgpu_invalid_wreg;
3240         adev->pcie_rreg = &amdgpu_invalid_rreg;
3241         adev->pcie_wreg = &amdgpu_invalid_wreg;
3242         adev->pciep_rreg = &amdgpu_invalid_rreg;
3243         adev->pciep_wreg = &amdgpu_invalid_wreg;
3244         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3245         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3246         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3247         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3248         adev->didt_rreg = &amdgpu_invalid_rreg;
3249         adev->didt_wreg = &amdgpu_invalid_wreg;
3250         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3251         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3252         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3253         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3254
3255         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3256                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3257                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3258
3259         /* mutex initialization are all done here so we
3260          * can recall function without having locking issues */
3261         atomic_set(&adev->irq.ih.lock, 0);
3262         mutex_init(&adev->firmware.mutex);
3263         mutex_init(&adev->pm.mutex);
3264         mutex_init(&adev->gfx.gpu_clock_mutex);
3265         mutex_init(&adev->srbm_mutex);
3266         mutex_init(&adev->gfx.pipe_reserve_mutex);
3267         mutex_init(&adev->gfx.gfx_off_mutex);
3268         mutex_init(&adev->grbm_idx_mutex);
3269         mutex_init(&adev->mn_lock);
3270         mutex_init(&adev->virt.vf_errors.lock);
3271         hash_init(adev->mn_hash);
3272         atomic_set(&adev->in_gpu_reset, 0);
3273         init_rwsem(&adev->reset_sem);
3274         mutex_init(&adev->psp.mutex);
3275         mutex_init(&adev->notifier_lock);
3276
3277         r = amdgpu_device_check_arguments(adev);
3278         if (r)
3279                 return r;
3280
3281         spin_lock_init(&adev->mmio_idx_lock);
3282         spin_lock_init(&adev->smc_idx_lock);
3283         spin_lock_init(&adev->pcie_idx_lock);
3284         spin_lock_init(&adev->uvd_ctx_idx_lock);
3285         spin_lock_init(&adev->didt_idx_lock);
3286         spin_lock_init(&adev->gc_cac_idx_lock);
3287         spin_lock_init(&adev->se_cac_idx_lock);
3288         spin_lock_init(&adev->audio_endpt_idx_lock);
3289         spin_lock_init(&adev->mm_stats.lock);
3290
3291         INIT_LIST_HEAD(&adev->shadow_list);
3292         mutex_init(&adev->shadow_list_lock);
3293
3294         INIT_DELAYED_WORK(&adev->delayed_init_work,
3295                           amdgpu_device_delayed_init_work_handler);
3296         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3297                           amdgpu_device_delay_enable_gfx_off);
3298
3299         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3300
3301         adev->gfx.gfx_off_req_count = 1;
3302         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3303
3304         atomic_set(&adev->throttling_logging_enabled, 1);
3305         /*
3306          * If throttling continues, logging will be performed every minute
3307          * to avoid log flooding. "-1" is subtracted since the thermal
3308          * throttling interrupt comes every second. Thus, the total logging
3309          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3310          * for throttling interrupt) = 60 seconds.
3311          */
3312         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3313         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3314
3315         /* Registers mapping */
3316         /* TODO: block userspace mapping of io register */
3317         if (adev->asic_type >= CHIP_BONAIRE) {
3318                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3319                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3320         } else {
3321                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3322                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3323         }
3324
3325         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3326         if (adev->rmmio == NULL) {
3327                 return -ENOMEM;
3328         }
3329         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3330         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3331
3332         /* io port mapping */
3333         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3334                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3335                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3336                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3337                         break;
3338                 }
3339         }
3340         if (adev->rio_mem == NULL)
3341                 DRM_INFO("PCI I/O BAR is not found.\n");
3342
3343         /* enable PCIE atomic ops */
3344         r = pci_enable_atomic_ops_to_root(adev->pdev,
3345                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3346                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3347         if (r) {
3348                 adev->have_atomics_support = false;
3349                 DRM_INFO("PCIE atomic ops is not supported\n");
3350         } else {
3351                 adev->have_atomics_support = true;
3352         }
3353
3354         amdgpu_device_get_pcie_info(adev);
3355
3356         if (amdgpu_mcbp)
3357                 DRM_INFO("MCBP is enabled\n");
3358
3359         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3360                 adev->enable_mes = true;
3361
3362         /* detect hw virtualization here */
3363         amdgpu_detect_virtualization(adev);
3364
3365         r = amdgpu_device_get_job_timeout_settings(adev);
3366         if (r) {
3367                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3368                 goto failed_unmap;
3369         }
3370
3371         /* early init functions */
3372         r = amdgpu_device_ip_early_init(adev);
3373         if (r)
3374                 goto failed_unmap;
3375
3376         /* doorbell bar mapping and doorbell index init*/
3377         amdgpu_device_doorbell_init(adev);
3378
3379         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3380         /* this will fail for cards that aren't VGA class devices, just
3381          * ignore it */
3382         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3383                 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3384
3385         if (amdgpu_device_supports_atpx(ddev))
3386                 atpx = true;
3387         if (amdgpu_has_atpx() &&
3388             (amdgpu_is_atpx_hybrid() ||
3389              amdgpu_has_atpx_dgpu_power_cntl()) &&
3390             !pci_is_thunderbolt_attached(adev->pdev))
3391                 vga_switcheroo_register_client(adev->pdev,
3392                                                &amdgpu_switcheroo_ops, atpx);
3393         if (atpx)
3394                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3395
3396         if (amdgpu_emu_mode == 1) {
3397                 /* post the asic on emulation mode */
3398                 emu_soc_asic_init(adev);
3399                 goto fence_driver_init;
3400         }
3401
3402         /* detect if we are with an SRIOV vbios */
3403         amdgpu_device_detect_sriov_bios(adev);
3404
3405         /* check if we need to reset the asic
3406          *  E.g., driver was not cleanly unloaded previously, etc.
3407          */
3408         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3409                 r = amdgpu_asic_reset(adev);
3410                 if (r) {
3411                         dev_err(adev->dev, "asic reset on init failed\n");
3412                         goto failed;
3413                 }
3414         }
3415
3416         pci_enable_pcie_error_reporting(adev->pdev);
3417
3418         /* Post card if necessary */
3419         if (amdgpu_device_need_post(adev)) {
3420                 if (!adev->bios) {
3421                         dev_err(adev->dev, "no vBIOS found\n");
3422                         r = -EINVAL;
3423                         goto failed;
3424                 }
3425                 DRM_INFO("GPU posting now...\n");
3426                 r = amdgpu_device_asic_init(adev);
3427                 if (r) {
3428                         dev_err(adev->dev, "gpu post error!\n");
3429                         goto failed;
3430                 }
3431         }
3432
3433         if (adev->is_atom_fw) {
3434                 /* Initialize clocks */
3435                 r = amdgpu_atomfirmware_get_clock_info(adev);
3436                 if (r) {
3437                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3438                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3439                         goto failed;
3440                 }
3441         } else {
3442                 /* Initialize clocks */
3443                 r = amdgpu_atombios_get_clock_info(adev);
3444                 if (r) {
3445                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3446                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3447                         goto failed;
3448                 }
3449                 /* init i2c buses */
3450                 if (!amdgpu_device_has_dc_support(adev))
3451                         amdgpu_atombios_i2c_init(adev);
3452         }
3453
3454 fence_driver_init:
3455         /* Fence driver */
3456         r = amdgpu_fence_driver_init(adev);
3457         if (r) {
3458                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3459                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3460                 goto failed;
3461         }
3462
3463         /* init the mode config */
3464         drm_mode_config_init(adev_to_drm(adev));
3465
3466         r = amdgpu_device_ip_init(adev);
3467         if (r) {
3468                 /* failed in exclusive mode due to timeout */
3469                 if (amdgpu_sriov_vf(adev) &&
3470                     !amdgpu_sriov_runtime(adev) &&
3471                     amdgpu_virt_mmio_blocked(adev) &&
3472                     !amdgpu_virt_wait_reset(adev)) {
3473                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3474                         /* Don't send request since VF is inactive. */
3475                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3476                         adev->virt.ops = NULL;
3477                         r = -EAGAIN;
3478                         goto failed;
3479                 }
3480                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3481                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3482                 goto failed;
3483         }
3484
3485         dev_info(adev->dev,
3486                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3487                         adev->gfx.config.max_shader_engines,
3488                         adev->gfx.config.max_sh_per_se,
3489                         adev->gfx.config.max_cu_per_sh,
3490                         adev->gfx.cu_info.number);
3491
3492         adev->accel_working = true;
3493
3494         amdgpu_vm_check_compute_bug(adev);
3495
3496         /* Initialize the buffer migration limit. */
3497         if (amdgpu_moverate >= 0)
3498                 max_MBps = amdgpu_moverate;
3499         else
3500                 max_MBps = 8; /* Allow 8 MB/s. */
3501         /* Get a log2 for easy divisions. */
3502         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3503
3504         amdgpu_fbdev_init(adev);
3505
3506         r = amdgpu_pm_sysfs_init(adev);
3507         if (r) {
3508                 adev->pm_sysfs_en = false;
3509                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3510         } else
3511                 adev->pm_sysfs_en = true;
3512
3513         r = amdgpu_ucode_sysfs_init(adev);
3514         if (r) {
3515                 adev->ucode_sysfs_en = false;
3516                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3517         } else
3518                 adev->ucode_sysfs_en = true;
3519
3520         if ((amdgpu_testing & 1)) {
3521                 if (adev->accel_working)
3522                         amdgpu_test_moves(adev);
3523                 else
3524                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3525         }
3526         if (amdgpu_benchmarking) {
3527                 if (adev->accel_working)
3528                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3529                 else
3530                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3531         }
3532
3533         /*
3534          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3535          * Otherwise the mgpu fan boost feature will be skipped due to the
3536          * gpu instance is counted less.
3537          */
3538         amdgpu_register_gpu_instance(adev);
3539
3540         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3541          * explicit gating rather than handling it automatically.
3542          */
3543         r = amdgpu_device_ip_late_init(adev);
3544         if (r) {
3545                 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3546                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3547                 goto failed;
3548         }
3549
3550         /* must succeed. */
3551         amdgpu_ras_resume(adev);
3552
3553         queue_delayed_work(system_wq, &adev->delayed_init_work,
3554                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3555
3556         if (amdgpu_sriov_vf(adev))
3557                 flush_delayed_work(&adev->delayed_init_work);
3558
3559         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3560         if (r)
3561                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3562
3563         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3564                 r = amdgpu_pmu_init(adev);
3565         if (r)
3566                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3567
3568         /* Have stored pci confspace at hand for restore in sudden PCI error */
3569         if (amdgpu_device_cache_pci_state(adev->pdev))
3570                 pci_restore_state(pdev);
3571
3572         return 0;
3573
3574 failed:
3575         amdgpu_vf_error_trans_all(adev);
3576         if (atpx)
3577                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3578
3579 failed_unmap:
3580         iounmap(adev->rmmio);
3581         adev->rmmio = NULL;
3582
3583         return r;
3584 }
3585
3586 /**
3587  * amdgpu_device_fini - tear down the driver
3588  *
3589  * @adev: amdgpu_device pointer
3590  *
3591  * Tear down the driver info (all asics).
3592  * Called at driver shutdown.
3593  */
3594 void amdgpu_device_fini(struct amdgpu_device *adev)
3595 {
3596         dev_info(adev->dev, "amdgpu: finishing device.\n");
3597         flush_delayed_work(&adev->delayed_init_work);
3598         adev->shutdown = true;
3599
3600         kfree(adev->pci_state);
3601
3602         /* make sure IB test finished before entering exclusive mode
3603          * to avoid preemption on IB test
3604          * */
3605         if (amdgpu_sriov_vf(adev)) {
3606                 amdgpu_virt_request_full_gpu(adev, false);
3607                 amdgpu_virt_fini_data_exchange(adev);
3608         }
3609
3610         /* disable all interrupts */
3611         amdgpu_irq_disable_all(adev);
3612         if (adev->mode_info.mode_config_initialized){
3613                 if (!amdgpu_device_has_dc_support(adev))
3614                         drm_helper_force_disable_all(adev_to_drm(adev));
3615                 else
3616                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3617         }
3618         amdgpu_fence_driver_fini(adev);
3619         if (adev->pm_sysfs_en)
3620                 amdgpu_pm_sysfs_fini(adev);
3621         amdgpu_fbdev_fini(adev);
3622         amdgpu_device_ip_fini(adev);
3623         release_firmware(adev->firmware.gpu_info_fw);
3624         adev->firmware.gpu_info_fw = NULL;
3625         adev->accel_working = false;
3626         /* free i2c buses */
3627         if (!amdgpu_device_has_dc_support(adev))
3628                 amdgpu_i2c_fini(adev);
3629
3630         if (amdgpu_emu_mode != 1)
3631                 amdgpu_atombios_fini(adev);
3632
3633         kfree(adev->bios);
3634         adev->bios = NULL;
3635         if (amdgpu_has_atpx() &&
3636             (amdgpu_is_atpx_hybrid() ||
3637              amdgpu_has_atpx_dgpu_power_cntl()) &&
3638             !pci_is_thunderbolt_attached(adev->pdev))
3639                 vga_switcheroo_unregister_client(adev->pdev);
3640         if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
3641                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3642         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3643                 vga_client_register(adev->pdev, NULL, NULL, NULL);
3644         if (adev->rio_mem)
3645                 pci_iounmap(adev->pdev, adev->rio_mem);
3646         adev->rio_mem = NULL;
3647         iounmap(adev->rmmio);
3648         adev->rmmio = NULL;
3649         amdgpu_device_doorbell_fini(adev);
3650
3651         if (adev->ucode_sysfs_en)
3652                 amdgpu_ucode_sysfs_fini(adev);
3653
3654         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3655         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3656                 amdgpu_pmu_fini(adev);
3657         if (adev->mman.discovery_bin)
3658                 amdgpu_discovery_fini(adev);
3659 }
3660
3661
3662 /*
3663  * Suspend & resume.
3664  */
3665 /**
3666  * amdgpu_device_suspend - initiate device suspend
3667  *
3668  * @dev: drm dev pointer
3669  * @fbcon : notify the fbdev of suspend
3670  *
3671  * Puts the hw in the suspend state (all asics).
3672  * Returns 0 for success or an error on failure.
3673  * Called at driver suspend.
3674  */
3675 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3676 {
3677         struct amdgpu_device *adev;
3678         struct drm_crtc *crtc;
3679         struct drm_connector *connector;
3680         struct drm_connector_list_iter iter;
3681         int r;
3682
3683         adev = drm_to_adev(dev);
3684
3685         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3686                 return 0;
3687
3688         adev->in_suspend = true;
3689         drm_kms_helper_poll_disable(dev);
3690
3691         if (fbcon)
3692                 amdgpu_fbdev_set_suspend(adev, 1);
3693
3694         cancel_delayed_work_sync(&adev->delayed_init_work);
3695
3696         if (!amdgpu_device_has_dc_support(adev)) {
3697                 /* turn off display hw */
3698                 drm_modeset_lock_all(dev);
3699                 drm_connector_list_iter_begin(dev, &iter);
3700                 drm_for_each_connector_iter(connector, &iter)
3701                         drm_helper_connector_dpms(connector,
3702                                                   DRM_MODE_DPMS_OFF);
3703                 drm_connector_list_iter_end(&iter);
3704                 drm_modeset_unlock_all(dev);
3705                         /* unpin the front buffers and cursors */
3706                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3707                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3708                         struct drm_framebuffer *fb = crtc->primary->fb;
3709                         struct amdgpu_bo *robj;
3710
3711                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3712                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3713                                 r = amdgpu_bo_reserve(aobj, true);
3714                                 if (r == 0) {
3715                                         amdgpu_bo_unpin(aobj);
3716                                         amdgpu_bo_unreserve(aobj);
3717                                 }
3718                         }
3719
3720                         if (fb == NULL || fb->obj[0] == NULL) {
3721                                 continue;
3722                         }
3723                         robj = gem_to_amdgpu_bo(fb->obj[0]);
3724                         /* don't unpin kernel fb objects */
3725                         if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3726                                 r = amdgpu_bo_reserve(robj, true);
3727                                 if (r == 0) {
3728                                         amdgpu_bo_unpin(robj);
3729                                         amdgpu_bo_unreserve(robj);
3730                                 }
3731                         }
3732                 }
3733         }
3734
3735         amdgpu_ras_suspend(adev);
3736
3737         r = amdgpu_device_ip_suspend_phase1(adev);
3738
3739         amdgpu_amdkfd_suspend(adev, !fbcon);
3740
3741         /* evict vram memory */
3742         amdgpu_bo_evict_vram(adev);
3743
3744         amdgpu_fence_driver_suspend(adev);
3745
3746         if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
3747                 r = amdgpu_device_ip_suspend_phase2(adev);
3748         else
3749                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
3750         /* evict remaining vram memory
3751          * This second call to evict vram is to evict the gart page table
3752          * using the CPU.
3753          */
3754         amdgpu_bo_evict_vram(adev);
3755
3756         return 0;
3757 }
3758
3759 /**
3760  * amdgpu_device_resume - initiate device resume
3761  *
3762  * @dev: drm dev pointer
3763  * @fbcon : notify the fbdev of resume
3764  *
3765  * Bring the hw back to operating state (all asics).
3766  * Returns 0 for success or an error on failure.
3767  * Called at driver resume.
3768  */
3769 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3770 {
3771         struct drm_connector *connector;
3772         struct drm_connector_list_iter iter;
3773         struct amdgpu_device *adev = drm_to_adev(dev);
3774         struct drm_crtc *crtc;
3775         int r = 0;
3776
3777         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3778                 return 0;
3779
3780         if (amdgpu_acpi_is_s0ix_supported(adev))
3781                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3782
3783         /* post card */
3784         if (amdgpu_device_need_post(adev)) {
3785                 r = amdgpu_device_asic_init(adev);
3786                 if (r)
3787                         dev_err(adev->dev, "amdgpu asic init failed\n");
3788         }
3789
3790         r = amdgpu_device_ip_resume(adev);
3791         if (r) {
3792                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3793                 return r;
3794         }
3795         amdgpu_fence_driver_resume(adev);
3796
3797
3798         r = amdgpu_device_ip_late_init(adev);
3799         if (r)
3800                 return r;
3801
3802         queue_delayed_work(system_wq, &adev->delayed_init_work,
3803                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3804
3805         if (!amdgpu_device_has_dc_support(adev)) {
3806                 /* pin cursors */
3807                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3808                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3809
3810                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3811                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3812                                 r = amdgpu_bo_reserve(aobj, true);
3813                                 if (r == 0) {
3814                                         r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3815                                         if (r != 0)
3816                                                 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3817                                         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3818                                         amdgpu_bo_unreserve(aobj);
3819                                 }
3820                         }
3821                 }
3822         }
3823         r = amdgpu_amdkfd_resume(adev, !fbcon);
3824         if (r)
3825                 return r;
3826
3827         /* Make sure IB tests flushed */
3828         flush_delayed_work(&adev->delayed_init_work);
3829
3830         /* blat the mode back in */
3831         if (fbcon) {
3832                 if (!amdgpu_device_has_dc_support(adev)) {
3833                         /* pre DCE11 */
3834                         drm_helper_resume_force_mode(dev);
3835
3836                         /* turn on display hw */
3837                         drm_modeset_lock_all(dev);
3838
3839                         drm_connector_list_iter_begin(dev, &iter);
3840                         drm_for_each_connector_iter(connector, &iter)
3841                                 drm_helper_connector_dpms(connector,
3842                                                           DRM_MODE_DPMS_ON);
3843                         drm_connector_list_iter_end(&iter);
3844
3845                         drm_modeset_unlock_all(dev);
3846                 }
3847                 amdgpu_fbdev_set_suspend(adev, 0);
3848         }
3849
3850         drm_kms_helper_poll_enable(dev);
3851
3852         amdgpu_ras_resume(adev);
3853
3854         /*
3855          * Most of the connector probing functions try to acquire runtime pm
3856          * refs to ensure that the GPU is powered on when connector polling is
3857          * performed. Since we're calling this from a runtime PM callback,
3858          * trying to acquire rpm refs will cause us to deadlock.
3859          *
3860          * Since we're guaranteed to be holding the rpm lock, it's safe to
3861          * temporarily disable the rpm helpers so this doesn't deadlock us.
3862          */
3863 #ifdef CONFIG_PM
3864         dev->dev->power.disable_depth++;
3865 #endif
3866         if (!amdgpu_device_has_dc_support(adev))
3867                 drm_helper_hpd_irq_event(dev);
3868         else
3869                 drm_kms_helper_hotplug_event(dev);
3870 #ifdef CONFIG_PM
3871         dev->dev->power.disable_depth--;
3872 #endif
3873         adev->in_suspend = false;
3874
3875         return 0;
3876 }
3877
3878 /**
3879  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3880  *
3881  * @adev: amdgpu_device pointer
3882  *
3883  * The list of all the hardware IPs that make up the asic is walked and
3884  * the check_soft_reset callbacks are run.  check_soft_reset determines
3885  * if the asic is still hung or not.
3886  * Returns true if any of the IPs are still in a hung state, false if not.
3887  */
3888 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3889 {
3890         int i;
3891         bool asic_hang = false;
3892
3893         if (amdgpu_sriov_vf(adev))
3894                 return true;
3895
3896         if (amdgpu_asic_need_full_reset(adev))
3897                 return true;
3898
3899         for (i = 0; i < adev->num_ip_blocks; i++) {
3900                 if (!adev->ip_blocks[i].status.valid)
3901                         continue;
3902                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3903                         adev->ip_blocks[i].status.hang =
3904                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3905                 if (adev->ip_blocks[i].status.hang) {
3906                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3907                         asic_hang = true;
3908                 }
3909         }
3910         return asic_hang;
3911 }
3912
3913 /**
3914  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3915  *
3916  * @adev: amdgpu_device pointer
3917  *
3918  * The list of all the hardware IPs that make up the asic is walked and the
3919  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3920  * handles any IP specific hardware or software state changes that are
3921  * necessary for a soft reset to succeed.
3922  * Returns 0 on success, negative error code on failure.
3923  */
3924 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3925 {
3926         int i, r = 0;
3927
3928         for (i = 0; i < adev->num_ip_blocks; i++) {
3929                 if (!adev->ip_blocks[i].status.valid)
3930                         continue;
3931                 if (adev->ip_blocks[i].status.hang &&
3932                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3933                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3934                         if (r)
3935                                 return r;
3936                 }
3937         }
3938
3939         return 0;
3940 }
3941
3942 /**
3943  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3944  *
3945  * @adev: amdgpu_device pointer
3946  *
3947  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3948  * reset is necessary to recover.
3949  * Returns true if a full asic reset is required, false if not.
3950  */
3951 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3952 {
3953         int i;
3954
3955         if (amdgpu_asic_need_full_reset(adev))
3956                 return true;
3957
3958         for (i = 0; i < adev->num_ip_blocks; i++) {
3959                 if (!adev->ip_blocks[i].status.valid)
3960                         continue;
3961                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3962                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3963                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3964                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3965                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3966                         if (adev->ip_blocks[i].status.hang) {
3967                                 dev_info(adev->dev, "Some block need full reset!\n");
3968                                 return true;
3969                         }
3970                 }
3971         }
3972         return false;
3973 }
3974
3975 /**
3976  * amdgpu_device_ip_soft_reset - do a soft reset
3977  *
3978  * @adev: amdgpu_device pointer
3979  *
3980  * The list of all the hardware IPs that make up the asic is walked and the
3981  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3982  * IP specific hardware or software state changes that are necessary to soft
3983  * reset the IP.
3984  * Returns 0 on success, negative error code on failure.
3985  */
3986 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3987 {
3988         int i, r = 0;
3989
3990         for (i = 0; i < adev->num_ip_blocks; i++) {
3991                 if (!adev->ip_blocks[i].status.valid)
3992                         continue;
3993                 if (adev->ip_blocks[i].status.hang &&
3994                     adev->ip_blocks[i].version->funcs->soft_reset) {
3995                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3996                         if (r)
3997                                 return r;
3998                 }
3999         }
4000
4001         return 0;
4002 }
4003
4004 /**
4005  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4006  *
4007  * @adev: amdgpu_device pointer
4008  *
4009  * The list of all the hardware IPs that make up the asic is walked and the
4010  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4011  * handles any IP specific hardware or software state changes that are
4012  * necessary after the IP has been soft reset.
4013  * Returns 0 on success, negative error code on failure.
4014  */
4015 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4016 {
4017         int i, r = 0;
4018
4019         for (i = 0; i < adev->num_ip_blocks; i++) {
4020                 if (!adev->ip_blocks[i].status.valid)
4021                         continue;
4022                 if (adev->ip_blocks[i].status.hang &&
4023                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4024                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4025                 if (r)
4026                         return r;
4027         }
4028
4029         return 0;
4030 }
4031
4032 /**
4033  * amdgpu_device_recover_vram - Recover some VRAM contents
4034  *
4035  * @adev: amdgpu_device pointer
4036  *
4037  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4038  * restore things like GPUVM page tables after a GPU reset where
4039  * the contents of VRAM might be lost.
4040  *
4041  * Returns:
4042  * 0 on success, negative error code on failure.
4043  */
4044 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4045 {
4046         struct dma_fence *fence = NULL, *next = NULL;
4047         struct amdgpu_bo *shadow;
4048         long r = 1, tmo;
4049
4050         if (amdgpu_sriov_runtime(adev))
4051                 tmo = msecs_to_jiffies(8000);
4052         else
4053                 tmo = msecs_to_jiffies(100);
4054
4055         dev_info(adev->dev, "recover vram bo from shadow start\n");
4056         mutex_lock(&adev->shadow_list_lock);
4057         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4058
4059                 /* No need to recover an evicted BO */
4060                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4061                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4062                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4063                         continue;
4064
4065                 r = amdgpu_bo_restore_shadow(shadow, &next);
4066                 if (r)
4067                         break;
4068
4069                 if (fence) {
4070                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4071                         dma_fence_put(fence);
4072                         fence = next;
4073                         if (tmo == 0) {
4074                                 r = -ETIMEDOUT;
4075                                 break;
4076                         } else if (tmo < 0) {
4077                                 r = tmo;
4078                                 break;
4079                         }
4080                 } else {
4081                         fence = next;
4082                 }
4083         }
4084         mutex_unlock(&adev->shadow_list_lock);
4085
4086         if (fence)
4087                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4088         dma_fence_put(fence);
4089
4090         if (r < 0 || tmo <= 0) {
4091                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4092                 return -EIO;
4093         }
4094
4095         dev_info(adev->dev, "recover vram bo from shadow done\n");
4096         return 0;
4097 }
4098
4099
4100 /**
4101  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4102  *
4103  * @adev: amdgpu_device pointer
4104  * @from_hypervisor: request from hypervisor
4105  *
4106  * do VF FLR and reinitialize Asic
4107  * return 0 means succeeded otherwise failed
4108  */
4109 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4110                                      bool from_hypervisor)
4111 {
4112         int r;
4113
4114         if (from_hypervisor)
4115                 r = amdgpu_virt_request_full_gpu(adev, true);
4116         else
4117                 r = amdgpu_virt_reset_gpu(adev);
4118         if (r)
4119                 return r;
4120
4121         amdgpu_amdkfd_pre_reset(adev);
4122
4123         /* Resume IP prior to SMC */
4124         r = amdgpu_device_ip_reinit_early_sriov(adev);
4125         if (r)
4126                 goto error;
4127
4128         amdgpu_virt_init_data_exchange(adev);
4129         /* we need recover gart prior to run SMC/CP/SDMA resume */
4130         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4131
4132         r = amdgpu_device_fw_loading(adev);
4133         if (r)
4134                 return r;
4135
4136         /* now we are okay to resume SMC/CP/SDMA */
4137         r = amdgpu_device_ip_reinit_late_sriov(adev);
4138         if (r)
4139                 goto error;
4140
4141         amdgpu_irq_gpu_reset_resume_helper(adev);
4142         r = amdgpu_ib_ring_tests(adev);
4143         amdgpu_amdkfd_post_reset(adev);
4144
4145 error:
4146         amdgpu_virt_release_full_gpu(adev, true);
4147         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4148                 amdgpu_inc_vram_lost(adev);
4149                 r = amdgpu_device_recover_vram(adev);
4150         }
4151
4152         return r;
4153 }
4154
4155 /**
4156  * amdgpu_device_has_job_running - check if there is any job in mirror list
4157  *
4158  * @adev: amdgpu_device pointer
4159  *
4160  * check if there is any job in mirror list
4161  */
4162 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4163 {
4164         int i;
4165         struct drm_sched_job *job;
4166
4167         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4168                 struct amdgpu_ring *ring = adev->rings[i];
4169
4170                 if (!ring || !ring->sched.thread)
4171                         continue;
4172
4173                 spin_lock(&ring->sched.job_list_lock);
4174                 job = list_first_entry_or_null(&ring->sched.pending_list,
4175                                                struct drm_sched_job, list);
4176                 spin_unlock(&ring->sched.job_list_lock);
4177                 if (job)
4178                         return true;
4179         }
4180         return false;
4181 }
4182
4183 /**
4184  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4185  *
4186  * @adev: amdgpu_device pointer
4187  *
4188  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4189  * a hung GPU.
4190  */
4191 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4192 {
4193         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4194                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4195                 return false;
4196         }
4197
4198         if (amdgpu_gpu_recovery == 0)
4199                 goto disabled;
4200
4201         if (amdgpu_sriov_vf(adev))
4202                 return true;
4203
4204         if (amdgpu_gpu_recovery == -1) {
4205                 switch (adev->asic_type) {
4206                 case CHIP_BONAIRE:
4207                 case CHIP_HAWAII:
4208                 case CHIP_TOPAZ:
4209                 case CHIP_TONGA:
4210                 case CHIP_FIJI:
4211                 case CHIP_POLARIS10:
4212                 case CHIP_POLARIS11:
4213                 case CHIP_POLARIS12:
4214                 case CHIP_VEGAM:
4215                 case CHIP_VEGA20:
4216                 case CHIP_VEGA10:
4217                 case CHIP_VEGA12:
4218                 case CHIP_RAVEN:
4219                 case CHIP_ARCTURUS:
4220                 case CHIP_RENOIR:
4221                 case CHIP_NAVI10:
4222                 case CHIP_NAVI14:
4223                 case CHIP_NAVI12:
4224                 case CHIP_SIENNA_CICHLID:
4225                 case CHIP_NAVY_FLOUNDER:
4226                         break;
4227                 default:
4228                         goto disabled;
4229                 }
4230         }
4231
4232         return true;
4233
4234 disabled:
4235                 dev_info(adev->dev, "GPU recovery disabled.\n");
4236                 return false;
4237 }
4238
4239
4240 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4241                                         struct amdgpu_job *job,
4242                                         bool *need_full_reset_arg)
4243 {
4244         int i, r = 0;
4245         bool need_full_reset  = *need_full_reset_arg;
4246
4247         amdgpu_debugfs_wait_dump(adev);
4248
4249         if (amdgpu_sriov_vf(adev)) {
4250                 /* stop the data exchange thread */
4251                 amdgpu_virt_fini_data_exchange(adev);
4252         }
4253
4254         /* block all schedulers and reset given job's ring */
4255         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4256                 struct amdgpu_ring *ring = adev->rings[i];
4257
4258                 if (!ring || !ring->sched.thread)
4259                         continue;
4260
4261                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4262                 amdgpu_fence_driver_force_completion(ring);
4263         }
4264
4265         if(job)
4266                 drm_sched_increase_karma(&job->base);
4267
4268         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4269         if (!amdgpu_sriov_vf(adev)) {
4270
4271                 if (!need_full_reset)
4272                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4273
4274                 if (!need_full_reset) {
4275                         amdgpu_device_ip_pre_soft_reset(adev);
4276                         r = amdgpu_device_ip_soft_reset(adev);
4277                         amdgpu_device_ip_post_soft_reset(adev);
4278                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4279                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4280                                 need_full_reset = true;
4281                         }
4282                 }
4283
4284                 if (need_full_reset)
4285                         r = amdgpu_device_ip_suspend(adev);
4286
4287                 *need_full_reset_arg = need_full_reset;
4288         }
4289
4290         return r;
4291 }
4292
4293 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4294                                struct list_head *device_list_handle,
4295                                bool *need_full_reset_arg,
4296                                bool skip_hw_reset)
4297 {
4298         struct amdgpu_device *tmp_adev = NULL;
4299         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4300         int r = 0;
4301
4302         /*
4303          * ASIC reset has to be done on all HGMI hive nodes ASAP
4304          * to allow proper links negotiation in FW (within 1 sec)
4305          */
4306         if (!skip_hw_reset && need_full_reset) {
4307                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4308                         /* For XGMI run all resets in parallel to speed up the process */
4309                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4310                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4311                                         r = -EALREADY;
4312                         } else
4313                                 r = amdgpu_asic_reset(tmp_adev);
4314
4315                         if (r) {
4316                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4317                                          r, adev_to_drm(tmp_adev)->unique);
4318                                 break;
4319                         }
4320                 }
4321
4322                 /* For XGMI wait for all resets to complete before proceed */
4323                 if (!r) {
4324                         list_for_each_entry(tmp_adev, device_list_handle,
4325                                             gmc.xgmi.head) {
4326                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4327                                         flush_work(&tmp_adev->xgmi_reset_work);
4328                                         r = tmp_adev->asic_reset_res;
4329                                         if (r)
4330                                                 break;
4331                                 }
4332                         }
4333                 }
4334         }
4335
4336         if (!r && amdgpu_ras_intr_triggered()) {
4337                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4338                         if (tmp_adev->mmhub.funcs &&
4339                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4340                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4341                 }
4342
4343                 amdgpu_ras_intr_cleared();
4344         }
4345
4346         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4347                 if (need_full_reset) {
4348                         /* post card */
4349                         if (amdgpu_device_asic_init(tmp_adev))
4350                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4351
4352                         if (!r) {
4353                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4354                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4355                                 if (r)
4356                                         goto out;
4357
4358                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4359                                 if (vram_lost) {
4360                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4361                                         amdgpu_inc_vram_lost(tmp_adev);
4362                                 }
4363
4364                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4365                                 if (r)
4366                                         goto out;
4367
4368                                 r = amdgpu_device_fw_loading(tmp_adev);
4369                                 if (r)
4370                                         return r;
4371
4372                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4373                                 if (r)
4374                                         goto out;
4375
4376                                 if (vram_lost)
4377                                         amdgpu_device_fill_reset_magic(tmp_adev);
4378
4379                                 /*
4380                                  * Add this ASIC as tracked as reset was already
4381                                  * complete successfully.
4382                                  */
4383                                 amdgpu_register_gpu_instance(tmp_adev);
4384
4385                                 r = amdgpu_device_ip_late_init(tmp_adev);
4386                                 if (r)
4387                                         goto out;
4388
4389                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4390
4391                                 /*
4392                                  * The GPU enters bad state once faulty pages
4393                                  * by ECC has reached the threshold, and ras
4394                                  * recovery is scheduled next. So add one check
4395                                  * here to break recovery if it indeed exceeds
4396                                  * bad page threshold, and remind user to
4397                                  * retire this GPU or setting one bigger
4398                                  * bad_page_threshold value to fix this once
4399                                  * probing driver again.
4400                                  */
4401                                 if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
4402                                         /* must succeed. */
4403                                         amdgpu_ras_resume(tmp_adev);
4404                                 } else {
4405                                         r = -EINVAL;
4406                                         goto out;
4407                                 }
4408
4409                                 /* Update PSP FW topology after reset */
4410                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4411                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4412                         }
4413                 }
4414
4415 out:
4416                 if (!r) {
4417                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4418                         r = amdgpu_ib_ring_tests(tmp_adev);
4419                         if (r) {
4420                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4421                                 r = amdgpu_device_ip_suspend(tmp_adev);
4422                                 need_full_reset = true;
4423                                 r = -EAGAIN;
4424                                 goto end;
4425                         }
4426                 }
4427
4428                 if (!r)
4429                         r = amdgpu_device_recover_vram(tmp_adev);
4430                 else
4431                         tmp_adev->asic_reset_res = r;
4432         }
4433
4434 end:
4435         *need_full_reset_arg = need_full_reset;
4436         return r;
4437 }
4438
4439 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4440                                 struct amdgpu_hive_info *hive)
4441 {
4442         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4443                 return false;
4444
4445         if (hive) {
4446                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4447         } else {
4448                 down_write(&adev->reset_sem);
4449         }
4450
4451         atomic_inc(&adev->gpu_reset_counter);
4452         switch (amdgpu_asic_reset_method(adev)) {
4453         case AMD_RESET_METHOD_MODE1:
4454                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4455                 break;
4456         case AMD_RESET_METHOD_MODE2:
4457                 adev->mp1_state = PP_MP1_STATE_RESET;
4458                 break;
4459         default:
4460                 adev->mp1_state = PP_MP1_STATE_NONE;
4461                 break;
4462         }
4463
4464         return true;
4465 }
4466
4467 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4468 {
4469         amdgpu_vf_error_trans_all(adev);
4470         adev->mp1_state = PP_MP1_STATE_NONE;
4471         atomic_set(&adev->in_gpu_reset, 0);
4472         up_write(&adev->reset_sem);
4473 }
4474
4475 /*
4476  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4477  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4478  *
4479  * unlock won't require roll back.
4480  */
4481 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4482 {
4483         struct amdgpu_device *tmp_adev = NULL;
4484
4485         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4486                 if (!hive) {
4487                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4488                         return -ENODEV;
4489                 }
4490                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4491                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4492                                 goto roll_back;
4493                 }
4494         } else if (!amdgpu_device_lock_adev(adev, hive))
4495                 return -EAGAIN;
4496
4497         return 0;
4498 roll_back:
4499         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4500                 /*
4501                  * if the lockup iteration break in the middle of a hive,
4502                  * it may means there may has a race issue,
4503                  * or a hive device locked up independently.
4504                  * we may be in trouble and may not, so will try to roll back
4505                  * the lock and give out a warnning.
4506                  */
4507                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4508                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4509                         amdgpu_device_unlock_adev(tmp_adev);
4510                 }
4511         }
4512         return -EAGAIN;
4513 }
4514
4515 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4516 {
4517         struct pci_dev *p = NULL;
4518
4519         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4520                         adev->pdev->bus->number, 1);
4521         if (p) {
4522                 pm_runtime_enable(&(p->dev));
4523                 pm_runtime_resume(&(p->dev));
4524         }
4525 }
4526
4527 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4528 {
4529         enum amd_reset_method reset_method;
4530         struct pci_dev *p = NULL;
4531         u64 expires;
4532
4533         /*
4534          * For now, only BACO and mode1 reset are confirmed
4535          * to suffer the audio issue without proper suspended.
4536          */
4537         reset_method = amdgpu_asic_reset_method(adev);
4538         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4539              (reset_method != AMD_RESET_METHOD_MODE1))
4540                 return -EINVAL;
4541
4542         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4543                         adev->pdev->bus->number, 1);
4544         if (!p)
4545                 return -ENODEV;
4546
4547         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4548         if (!expires)
4549                 /*
4550                  * If we cannot get the audio device autosuspend delay,
4551                  * a fixed 4S interval will be used. Considering 3S is
4552                  * the audio controller default autosuspend delay setting.
4553                  * 4S used here is guaranteed to cover that.
4554                  */
4555                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4556
4557         while (!pm_runtime_status_suspended(&(p->dev))) {
4558                 if (!pm_runtime_suspend(&(p->dev)))
4559                         break;
4560
4561                 if (expires < ktime_get_mono_fast_ns()) {
4562                         dev_warn(adev->dev, "failed to suspend display audio\n");
4563                         /* TODO: abort the succeeding gpu reset? */
4564                         return -ETIMEDOUT;
4565                 }
4566         }
4567
4568         pm_runtime_disable(&(p->dev));
4569
4570         return 0;
4571 }
4572
4573 /**
4574  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4575  *
4576  * @adev: amdgpu_device pointer
4577  * @job: which job trigger hang
4578  *
4579  * Attempt to reset the GPU if it has hung (all asics).
4580  * Attempt to do soft-reset or full-reset and reinitialize Asic
4581  * Returns 0 for success or an error on failure.
4582  */
4583
4584 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4585                               struct amdgpu_job *job)
4586 {
4587         struct list_head device_list, *device_list_handle =  NULL;
4588         bool need_full_reset = false;
4589         bool job_signaled = false;
4590         struct amdgpu_hive_info *hive = NULL;
4591         struct amdgpu_device *tmp_adev = NULL;
4592         int i, r = 0;
4593         bool need_emergency_restart = false;
4594         bool audio_suspended = false;
4595
4596         /*
4597          * Special case: RAS triggered and full reset isn't supported
4598          */
4599         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4600
4601         /*
4602          * Flush RAM to disk so that after reboot
4603          * the user can read log and see why the system rebooted.
4604          */
4605         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4606                 DRM_WARN("Emergency reboot.");
4607
4608                 ksys_sync_helper();
4609                 emergency_restart();
4610         }
4611
4612         dev_info(adev->dev, "GPU %s begin!\n",
4613                 need_emergency_restart ? "jobs stop":"reset");
4614
4615         /*
4616          * Here we trylock to avoid chain of resets executing from
4617          * either trigger by jobs on different adevs in XGMI hive or jobs on
4618          * different schedulers for same device while this TO handler is running.
4619          * We always reset all schedulers for device and all devices for XGMI
4620          * hive so that should take care of them too.
4621          */
4622         hive = amdgpu_get_xgmi_hive(adev);
4623         if (hive) {
4624                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4625                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4626                                 job ? job->base.id : -1, hive->hive_id);
4627                         amdgpu_put_xgmi_hive(hive);
4628                         if (job)
4629                                 drm_sched_increase_karma(&job->base);
4630                         return 0;
4631                 }
4632                 mutex_lock(&hive->hive_lock);
4633         }
4634
4635         /*
4636          * lock the device before we try to operate the linked list
4637          * if didn't get the device lock, don't touch the linked list since
4638          * others may iterating it.
4639          */
4640         r = amdgpu_device_lock_hive_adev(adev, hive);
4641         if (r) {
4642                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4643                                         job ? job->base.id : -1);
4644
4645                 /* even we skipped this reset, still need to set the job to guilty */
4646                 if (job)
4647                         drm_sched_increase_karma(&job->base);
4648                 goto skip_recovery;
4649         }
4650
4651         /*
4652          * Build list of devices to reset.
4653          * In case we are in XGMI hive mode, resort the device list
4654          * to put adev in the 1st position.
4655          */
4656         INIT_LIST_HEAD(&device_list);
4657         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4658                 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4659                         list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4660                 device_list_handle = &hive->device_list;
4661         } else {
4662                 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4663                 device_list_handle = &device_list;
4664         }
4665
4666         /* block all schedulers and reset given job's ring */
4667         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4668                 /*
4669                  * Try to put the audio codec into suspend state
4670                  * before gpu reset started.
4671                  *
4672                  * Due to the power domain of the graphics device
4673                  * is shared with AZ power domain. Without this,
4674                  * we may change the audio hardware from behind
4675                  * the audio driver's back. That will trigger
4676                  * some audio codec errors.
4677                  */
4678                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4679                         audio_suspended = true;
4680
4681                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4682
4683                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4684
4685                 if (!amdgpu_sriov_vf(tmp_adev))
4686                         amdgpu_amdkfd_pre_reset(tmp_adev);
4687
4688                 /*
4689                  * Mark these ASICs to be reseted as untracked first
4690                  * And add them back after reset completed
4691                  */
4692                 amdgpu_unregister_gpu_instance(tmp_adev);
4693
4694                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4695
4696                 /* disable ras on ALL IPs */
4697                 if (!need_emergency_restart &&
4698                       amdgpu_device_ip_need_full_reset(tmp_adev))
4699                         amdgpu_ras_suspend(tmp_adev);
4700
4701                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4702                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4703
4704                         if (!ring || !ring->sched.thread)
4705                                 continue;
4706
4707                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4708
4709                         if (need_emergency_restart)
4710                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4711                 }
4712         }
4713
4714         if (need_emergency_restart)
4715                 goto skip_sched_resume;
4716
4717         /*
4718          * Must check guilty signal here since after this point all old
4719          * HW fences are force signaled.
4720          *
4721          * job->base holds a reference to parent fence
4722          */
4723         if (job && job->base.s_fence->parent &&
4724             dma_fence_is_signaled(job->base.s_fence->parent)) {
4725                 job_signaled = true;
4726                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4727                 goto skip_hw_reset;
4728         }
4729
4730 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4731         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4732                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4733                                                  (tmp_adev == adev) ? job : NULL,
4734                                                  &need_full_reset);
4735                 /*TODO Should we stop ?*/
4736                 if (r) {
4737                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4738                                   r, adev_to_drm(tmp_adev)->unique);
4739                         tmp_adev->asic_reset_res = r;
4740                 }
4741         }
4742
4743         /* Actual ASIC resets if needed.*/
4744         /* TODO Implement XGMI hive reset logic for SRIOV */
4745         if (amdgpu_sriov_vf(adev)) {
4746                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4747                 if (r)
4748                         adev->asic_reset_res = r;
4749         } else {
4750                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4751                 if (r && r == -EAGAIN)
4752                         goto retry;
4753         }
4754
4755 skip_hw_reset:
4756
4757         /* Post ASIC reset for all devs .*/
4758         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4759
4760                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4761                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4762
4763                         if (!ring || !ring->sched.thread)
4764                                 continue;
4765
4766                         /* No point to resubmit jobs if we didn't HW reset*/
4767                         if (!tmp_adev->asic_reset_res && !job_signaled)
4768                                 drm_sched_resubmit_jobs(&ring->sched);
4769
4770                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4771                 }
4772
4773                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4774                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4775                 }
4776
4777                 tmp_adev->asic_reset_res = 0;
4778
4779                 if (r) {
4780                         /* bad news, how to tell it to userspace ? */
4781                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4782                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4783                 } else {
4784                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4785                 }
4786         }
4787
4788 skip_sched_resume:
4789         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4790                 /*unlock kfd: SRIOV would do it separately */
4791                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4792                         amdgpu_amdkfd_post_reset(tmp_adev);
4793                 if (audio_suspended)
4794                         amdgpu_device_resume_display_audio(tmp_adev);
4795                 amdgpu_device_unlock_adev(tmp_adev);
4796         }
4797
4798 skip_recovery:
4799         if (hive) {
4800                 atomic_set(&hive->in_reset, 0);
4801                 mutex_unlock(&hive->hive_lock);
4802                 amdgpu_put_xgmi_hive(hive);
4803         }
4804
4805         if (r && r != -EAGAIN)
4806                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4807         return r;
4808 }
4809
4810 /**
4811  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4812  *
4813  * @adev: amdgpu_device pointer
4814  *
4815  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4816  * and lanes) of the slot the device is in. Handles APUs and
4817  * virtualized environments where PCIE config space may not be available.
4818  */
4819 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4820 {
4821         struct pci_dev *pdev;
4822         enum pci_bus_speed speed_cap, platform_speed_cap;
4823         enum pcie_link_width platform_link_width;
4824
4825         if (amdgpu_pcie_gen_cap)
4826                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4827
4828         if (amdgpu_pcie_lane_cap)
4829                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4830
4831         /* covers APUs as well */
4832         if (pci_is_root_bus(adev->pdev->bus)) {
4833                 if (adev->pm.pcie_gen_mask == 0)
4834                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4835                 if (adev->pm.pcie_mlw_mask == 0)
4836                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4837                 return;
4838         }
4839
4840         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4841                 return;
4842
4843         pcie_bandwidth_available(adev->pdev, NULL,
4844                                  &platform_speed_cap, &platform_link_width);
4845
4846         if (adev->pm.pcie_gen_mask == 0) {
4847                 /* asic caps */
4848                 pdev = adev->pdev;
4849                 speed_cap = pcie_get_speed_cap(pdev);
4850                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4851                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4852                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4853                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4854                 } else {
4855                         if (speed_cap == PCIE_SPEED_32_0GT)
4856                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4857                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4858                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4859                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4860                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
4861                         else if (speed_cap == PCIE_SPEED_16_0GT)
4862                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4863                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4864                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4865                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4866                         else if (speed_cap == PCIE_SPEED_8_0GT)
4867                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4868                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4869                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4870                         else if (speed_cap == PCIE_SPEED_5_0GT)
4871                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4872                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4873                         else
4874                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4875                 }
4876                 /* platform caps */
4877                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4878                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4879                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4880                 } else {
4881                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
4882                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4883                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4884                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4885                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4886                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
4887                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4888                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4889                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4890                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4891                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4892                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4893                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4894                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4895                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4896                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4897                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4898                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4899                         else
4900                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4901
4902                 }
4903         }
4904         if (adev->pm.pcie_mlw_mask == 0) {
4905                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4906                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4907                 } else {
4908                         switch (platform_link_width) {
4909                         case PCIE_LNK_X32:
4910                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4911                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4912                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4913                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4914                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4915                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4916                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4917                                 break;
4918                         case PCIE_LNK_X16:
4919                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4920                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4921                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4922                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4923                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4924                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4925                                 break;
4926                         case PCIE_LNK_X12:
4927                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4928                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4929                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4930                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4931                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4932                                 break;
4933                         case PCIE_LNK_X8:
4934                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4935                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4936                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4937                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4938                                 break;
4939                         case PCIE_LNK_X4:
4940                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4941                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4942                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4943                                 break;
4944                         case PCIE_LNK_X2:
4945                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4946                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4947                                 break;
4948                         case PCIE_LNK_X1:
4949                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4950                                 break;
4951                         default:
4952                                 break;
4953                         }
4954                 }
4955         }
4956 }
4957
4958 int amdgpu_device_baco_enter(struct drm_device *dev)
4959 {
4960         struct amdgpu_device *adev = drm_to_adev(dev);
4961         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4962
4963         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4964                 return -ENOTSUPP;
4965
4966         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4967                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4968
4969         return amdgpu_dpm_baco_enter(adev);
4970 }
4971
4972 int amdgpu_device_baco_exit(struct drm_device *dev)
4973 {
4974         struct amdgpu_device *adev = drm_to_adev(dev);
4975         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4976         int ret = 0;
4977
4978         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4979                 return -ENOTSUPP;
4980
4981         ret = amdgpu_dpm_baco_exit(adev);
4982         if (ret)
4983                 return ret;
4984
4985         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4986                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4987
4988         return 0;
4989 }
4990
4991 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
4992 {
4993         int i;
4994
4995         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4996                 struct amdgpu_ring *ring = adev->rings[i];
4997
4998                 if (!ring || !ring->sched.thread)
4999                         continue;
5000
5001                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5002         }
5003 }
5004
5005 /**
5006  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5007  * @pdev: PCI device struct
5008  * @state: PCI channel state
5009  *
5010  * Description: Called when a PCI error is detected.
5011  *
5012  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5013  */
5014 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5015 {
5016         struct drm_device *dev = pci_get_drvdata(pdev);
5017         struct amdgpu_device *adev = drm_to_adev(dev);
5018         int i;
5019
5020         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5021
5022         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5023                 DRM_WARN("No support for XGMI hive yet...");
5024                 return PCI_ERS_RESULT_DISCONNECT;
5025         }
5026
5027         switch (state) {
5028         case pci_channel_io_normal:
5029                 return PCI_ERS_RESULT_CAN_RECOVER;
5030         /* Fatal error, prepare for slot reset */
5031         case pci_channel_io_frozen:
5032                 /*
5033                  * Cancel and wait for all TDRs in progress if failing to
5034                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5035                  *
5036                  * Locking adev->reset_sem will prevent any external access
5037                  * to GPU during PCI error recovery
5038                  */
5039                 while (!amdgpu_device_lock_adev(adev, NULL))
5040                         amdgpu_cancel_all_tdr(adev);
5041
5042                 /*
5043                  * Block any work scheduling as we do for regular GPU reset
5044                  * for the duration of the recovery
5045                  */
5046                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5047                         struct amdgpu_ring *ring = adev->rings[i];
5048
5049                         if (!ring || !ring->sched.thread)
5050                                 continue;
5051
5052                         drm_sched_stop(&ring->sched, NULL);
5053                 }
5054                 return PCI_ERS_RESULT_NEED_RESET;
5055         case pci_channel_io_perm_failure:
5056                 /* Permanent error, prepare for device removal */
5057                 return PCI_ERS_RESULT_DISCONNECT;
5058         }
5059
5060         return PCI_ERS_RESULT_NEED_RESET;
5061 }
5062
5063 /**
5064  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5065  * @pdev: pointer to PCI device
5066  */
5067 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5068 {
5069
5070         DRM_INFO("PCI error: mmio enabled callback!!\n");
5071
5072         /* TODO - dump whatever for debugging purposes */
5073
5074         /* This called only if amdgpu_pci_error_detected returns
5075          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5076          * works, no need to reset slot.
5077          */
5078
5079         return PCI_ERS_RESULT_RECOVERED;
5080 }
5081
5082 /**
5083  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5084  * @pdev: PCI device struct
5085  *
5086  * Description: This routine is called by the pci error recovery
5087  * code after the PCI slot has been reset, just before we
5088  * should resume normal operations.
5089  */
5090 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5091 {
5092         struct drm_device *dev = pci_get_drvdata(pdev);
5093         struct amdgpu_device *adev = drm_to_adev(dev);
5094         int r, i;
5095         bool need_full_reset = true;
5096         u32 memsize;
5097         struct list_head device_list;
5098
5099         DRM_INFO("PCI error: slot reset callback!!\n");
5100
5101         INIT_LIST_HEAD(&device_list);
5102         list_add_tail(&adev->gmc.xgmi.head, &device_list);
5103
5104         /* wait for asic to come out of reset */
5105         msleep(500);
5106
5107         /* Restore PCI confspace */
5108         amdgpu_device_load_pci_state(pdev);
5109
5110         /* confirm  ASIC came out of reset */
5111         for (i = 0; i < adev->usec_timeout; i++) {
5112                 memsize = amdgpu_asic_get_config_memsize(adev);
5113
5114                 if (memsize != 0xffffffff)
5115                         break;
5116                 udelay(1);
5117         }
5118         if (memsize == 0xffffffff) {
5119                 r = -ETIME;
5120                 goto out;
5121         }
5122
5123         adev->in_pci_err_recovery = true;
5124         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5125         adev->in_pci_err_recovery = false;
5126         if (r)
5127                 goto out;
5128
5129         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5130
5131 out:
5132         if (!r) {
5133                 if (amdgpu_device_cache_pci_state(adev->pdev))
5134                         pci_restore_state(adev->pdev);
5135
5136                 DRM_INFO("PCIe error recovery succeeded\n");
5137         } else {
5138                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5139                 amdgpu_device_unlock_adev(adev);
5140         }
5141
5142         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5143 }
5144
5145 /**
5146  * amdgpu_pci_resume() - resume normal ops after PCI reset
5147  * @pdev: pointer to PCI device
5148  *
5149  * Called when the error recovery driver tells us that its
5150  * OK to resume normal operation.
5151  */
5152 void amdgpu_pci_resume(struct pci_dev *pdev)
5153 {
5154         struct drm_device *dev = pci_get_drvdata(pdev);
5155         struct amdgpu_device *adev = drm_to_adev(dev);
5156         int i;
5157
5158
5159         DRM_INFO("PCI error: resume callback!!\n");
5160
5161         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5162                 struct amdgpu_ring *ring = adev->rings[i];
5163
5164                 if (!ring || !ring->sched.thread)
5165                         continue;
5166
5167
5168                 drm_sched_resubmit_jobs(&ring->sched);
5169                 drm_sched_start(&ring->sched, true);
5170         }
5171
5172         amdgpu_device_unlock_adev(adev);
5173 }
5174
5175 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5176 {
5177         struct drm_device *dev = pci_get_drvdata(pdev);
5178         struct amdgpu_device *adev = drm_to_adev(dev);
5179         int r;
5180
5181         r = pci_save_state(pdev);
5182         if (!r) {
5183                 kfree(adev->pci_state);
5184
5185                 adev->pci_state = pci_store_saved_state(pdev);
5186
5187                 if (!adev->pci_state) {
5188                         DRM_ERROR("Failed to store PCI saved state");
5189                         return false;
5190                 }
5191         } else {
5192                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5193                 return false;
5194         }
5195
5196         return true;
5197 }
5198
5199 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5200 {
5201         struct drm_device *dev = pci_get_drvdata(pdev);
5202         struct amdgpu_device *adev = drm_to_adev(dev);
5203         int r;
5204
5205         if (!adev->pci_state)
5206                 return false;
5207
5208         r = pci_load_saved_state(pdev, adev->pci_state);
5209
5210         if (!r) {
5211                 pci_restore_state(pdev);
5212         } else {
5213                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5214                 return false;
5215         }
5216
5217         return true;
5218 }
5219
5220