Merge tag 'drm-intel-gt-next-2021-01-21-1' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
85
86 #define AMDGPU_RESUME_MS                2000
87
88 const char *amdgpu_asic_name[] = {
89         "TAHITI",
90         "PITCAIRN",
91         "VERDE",
92         "OLAND",
93         "HAINAN",
94         "BONAIRE",
95         "KAVERI",
96         "KABINI",
97         "HAWAII",
98         "MULLINS",
99         "TOPAZ",
100         "TONGA",
101         "FIJI",
102         "CARRIZO",
103         "STONEY",
104         "POLARIS10",
105         "POLARIS11",
106         "POLARIS12",
107         "VEGAM",
108         "VEGA10",
109         "VEGA12",
110         "VEGA20",
111         "RAVEN",
112         "ARCTURUS",
113         "RENOIR",
114         "NAVI10",
115         "NAVI14",
116         "NAVI12",
117         "SIENNA_CICHLID",
118         "NAVY_FLOUNDER",
119         "VANGOGH",
120         "DIMGREY_CAVEFISH",
121         "LAST",
122 };
123
124 /**
125  * DOC: pcie_replay_count
126  *
127  * The amdgpu driver provides a sysfs API for reporting the total number
128  * of PCIe replays (NAKs)
129  * The file pcie_replay_count is used for this and returns the total
130  * number of replays as a sum of the NAKs generated and NAKs received
131  */
132
133 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
134                 struct device_attribute *attr, char *buf)
135 {
136         struct drm_device *ddev = dev_get_drvdata(dev);
137         struct amdgpu_device *adev = drm_to_adev(ddev);
138         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
139
140         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
141 }
142
143 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
144                 amdgpu_device_get_pcie_replay_count, NULL);
145
146 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
147
148 /**
149  * DOC: product_name
150  *
151  * The amdgpu driver provides a sysfs API for reporting the product name
152  * for the device
153  * The file serial_number is used for this and returns the product name
154  * as returned from the FRU.
155  * NOTE: This is only available for certain server cards
156  */
157
158 static ssize_t amdgpu_device_get_product_name(struct device *dev,
159                 struct device_attribute *attr, char *buf)
160 {
161         struct drm_device *ddev = dev_get_drvdata(dev);
162         struct amdgpu_device *adev = drm_to_adev(ddev);
163
164         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
165 }
166
167 static DEVICE_ATTR(product_name, S_IRUGO,
168                 amdgpu_device_get_product_name, NULL);
169
170 /**
171  * DOC: product_number
172  *
173  * The amdgpu driver provides a sysfs API for reporting the part number
174  * for the device
175  * The file serial_number is used for this and returns the part number
176  * as returned from the FRU.
177  * NOTE: This is only available for certain server cards
178  */
179
180 static ssize_t amdgpu_device_get_product_number(struct device *dev,
181                 struct device_attribute *attr, char *buf)
182 {
183         struct drm_device *ddev = dev_get_drvdata(dev);
184         struct amdgpu_device *adev = drm_to_adev(ddev);
185
186         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
187 }
188
189 static DEVICE_ATTR(product_number, S_IRUGO,
190                 amdgpu_device_get_product_number, NULL);
191
192 /**
193  * DOC: serial_number
194  *
195  * The amdgpu driver provides a sysfs API for reporting the serial number
196  * for the device
197  * The file serial_number is used for this and returns the serial number
198  * as returned from the FRU.
199  * NOTE: This is only available for certain server cards
200  */
201
202 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
203                 struct device_attribute *attr, char *buf)
204 {
205         struct drm_device *ddev = dev_get_drvdata(dev);
206         struct amdgpu_device *adev = drm_to_adev(ddev);
207
208         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
209 }
210
211 static DEVICE_ATTR(serial_number, S_IRUGO,
212                 amdgpu_device_get_serial_number, NULL);
213
214 /**
215  * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
216  *
217  * @dev: drm_device pointer
218  *
219  * Returns true if the device is a dGPU with HG/PX power control,
220  * otherwise return false.
221  */
222 bool amdgpu_device_supports_atpx(struct drm_device *dev)
223 {
224         struct amdgpu_device *adev = drm_to_adev(dev);
225
226         if (adev->flags & AMD_IS_PX)
227                 return true;
228         return false;
229 }
230
231 /**
232  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
233  *
234  * @dev: drm_device pointer
235  *
236  * Returns true if the device is a dGPU with HG/PX power control,
237  * otherwise return false.
238  */
239 bool amdgpu_device_supports_boco(struct drm_device *dev)
240 {
241         struct amdgpu_device *adev = drm_to_adev(dev);
242
243         if (adev->has_pr3)
244                 return true;
245         return false;
246 }
247
248 /**
249  * amdgpu_device_supports_baco - Does the device support BACO
250  *
251  * @dev: drm_device pointer
252  *
253  * Returns true if the device supporte BACO,
254  * otherwise return false.
255  */
256 bool amdgpu_device_supports_baco(struct drm_device *dev)
257 {
258         struct amdgpu_device *adev = drm_to_adev(dev);
259
260         return amdgpu_asic_supports_baco(adev);
261 }
262
263 /*
264  * VRAM access helper functions
265  */
266
267 /**
268  * amdgpu_device_vram_access - read/write a buffer in vram
269  *
270  * @adev: amdgpu_device pointer
271  * @pos: offset of the buffer in vram
272  * @buf: virtual address of the buffer in system memory
273  * @size: read/write size, sizeof(@buf) must > @size
274  * @write: true - write to vram, otherwise - read from vram
275  */
276 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
277                                uint32_t *buf, size_t size, bool write)
278 {
279         unsigned long flags;
280         uint32_t hi = ~0;
281         uint64_t last;
282
283
284 #ifdef CONFIG_64BIT
285         last = min(pos + size, adev->gmc.visible_vram_size);
286         if (last > pos) {
287                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
288                 size_t count = last - pos;
289
290                 if (write) {
291                         memcpy_toio(addr, buf, count);
292                         mb();
293                         amdgpu_asic_flush_hdp(adev, NULL);
294                 } else {
295                         amdgpu_asic_invalidate_hdp(adev, NULL);
296                         mb();
297                         memcpy_fromio(buf, addr, count);
298                 }
299
300                 if (count == size)
301                         return;
302
303                 pos += count;
304                 buf += count / 4;
305                 size -= count;
306         }
307 #endif
308
309         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
310         for (last = pos + size; pos < last; pos += 4) {
311                 uint32_t tmp = pos >> 31;
312
313                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
314                 if (tmp != hi) {
315                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
316                         hi = tmp;
317                 }
318                 if (write)
319                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
320                 else
321                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
322         }
323         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
324 }
325
326 /*
327  * register access helper functions.
328  */
329 /**
330  * amdgpu_device_rreg - read a memory mapped IO or indirect register
331  *
332  * @adev: amdgpu_device pointer
333  * @reg: dword aligned register offset
334  * @acc_flags: access flags which require special behavior
335  *
336  * Returns the 32 bit value from the offset specified.
337  */
338 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
339                             uint32_t reg, uint32_t acc_flags)
340 {
341         uint32_t ret;
342
343         if (adev->in_pci_err_recovery)
344                 return 0;
345
346         if ((reg * 4) < adev->rmmio_size) {
347                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
348                     amdgpu_sriov_runtime(adev) &&
349                     down_read_trylock(&adev->reset_sem)) {
350                         ret = amdgpu_kiq_rreg(adev, reg);
351                         up_read(&adev->reset_sem);
352                 } else {
353                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
354                 }
355         } else {
356                 ret = adev->pcie_rreg(adev, reg * 4);
357         }
358
359         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
360
361         return ret;
362 }
363
364 /*
365  * MMIO register read with bytes helper functions
366  * @offset:bytes offset from MMIO start
367  *
368 */
369
370 /**
371  * amdgpu_mm_rreg8 - read a memory mapped IO register
372  *
373  * @adev: amdgpu_device pointer
374  * @offset: byte aligned register offset
375  *
376  * Returns the 8 bit value from the offset specified.
377  */
378 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
379 {
380         if (adev->in_pci_err_recovery)
381                 return 0;
382
383         if (offset < adev->rmmio_size)
384                 return (readb(adev->rmmio + offset));
385         BUG();
386 }
387
388 /*
389  * MMIO register write with bytes helper functions
390  * @offset:bytes offset from MMIO start
391  * @value: the value want to be written to the register
392  *
393 */
394 /**
395  * amdgpu_mm_wreg8 - read a memory mapped IO register
396  *
397  * @adev: amdgpu_device pointer
398  * @offset: byte aligned register offset
399  * @value: 8 bit value to write
400  *
401  * Writes the value specified to the offset specified.
402  */
403 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
404 {
405         if (adev->in_pci_err_recovery)
406                 return;
407
408         if (offset < adev->rmmio_size)
409                 writeb(value, adev->rmmio + offset);
410         else
411                 BUG();
412 }
413
414 /**
415  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
416  *
417  * @adev: amdgpu_device pointer
418  * @reg: dword aligned register offset
419  * @v: 32 bit value to write to the register
420  * @acc_flags: access flags which require special behavior
421  *
422  * Writes the value specified to the offset specified.
423  */
424 void amdgpu_device_wreg(struct amdgpu_device *adev,
425                         uint32_t reg, uint32_t v,
426                         uint32_t acc_flags)
427 {
428         if (adev->in_pci_err_recovery)
429                 return;
430
431         if ((reg * 4) < adev->rmmio_size) {
432                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
433                     amdgpu_sriov_runtime(adev) &&
434                     down_read_trylock(&adev->reset_sem)) {
435                         amdgpu_kiq_wreg(adev, reg, v);
436                         up_read(&adev->reset_sem);
437                 } else {
438                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
439                 }
440         } else {
441                 adev->pcie_wreg(adev, reg * 4, v);
442         }
443
444         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
445 }
446
447 /*
448  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
449  *
450  * this function is invoked only the debugfs register access
451  * */
452 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
453                              uint32_t reg, uint32_t v)
454 {
455         if (adev->in_pci_err_recovery)
456                 return;
457
458         if (amdgpu_sriov_fullaccess(adev) &&
459             adev->gfx.rlc.funcs &&
460             adev->gfx.rlc.funcs->is_rlcg_access_range) {
461                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
462                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
463         } else {
464                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
465         }
466 }
467
468 /**
469  * amdgpu_io_rreg - read an IO register
470  *
471  * @adev: amdgpu_device pointer
472  * @reg: dword aligned register offset
473  *
474  * Returns the 32 bit value from the offset specified.
475  */
476 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
477 {
478         if (adev->in_pci_err_recovery)
479                 return 0;
480
481         if ((reg * 4) < adev->rio_mem_size)
482                 return ioread32(adev->rio_mem + (reg * 4));
483         else {
484                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
485                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
486         }
487 }
488
489 /**
490  * amdgpu_io_wreg - write to an IO register
491  *
492  * @adev: amdgpu_device pointer
493  * @reg: dword aligned register offset
494  * @v: 32 bit value to write to the register
495  *
496  * Writes the value specified to the offset specified.
497  */
498 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
499 {
500         if (adev->in_pci_err_recovery)
501                 return;
502
503         if ((reg * 4) < adev->rio_mem_size)
504                 iowrite32(v, adev->rio_mem + (reg * 4));
505         else {
506                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
507                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
508         }
509 }
510
511 /**
512  * amdgpu_mm_rdoorbell - read a doorbell dword
513  *
514  * @adev: amdgpu_device pointer
515  * @index: doorbell index
516  *
517  * Returns the value in the doorbell aperture at the
518  * requested doorbell index (CIK).
519  */
520 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
521 {
522         if (adev->in_pci_err_recovery)
523                 return 0;
524
525         if (index < adev->doorbell.num_doorbells) {
526                 return readl(adev->doorbell.ptr + index);
527         } else {
528                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
529                 return 0;
530         }
531 }
532
533 /**
534  * amdgpu_mm_wdoorbell - write a doorbell dword
535  *
536  * @adev: amdgpu_device pointer
537  * @index: doorbell index
538  * @v: value to write
539  *
540  * Writes @v to the doorbell aperture at the
541  * requested doorbell index (CIK).
542  */
543 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
544 {
545         if (adev->in_pci_err_recovery)
546                 return;
547
548         if (index < adev->doorbell.num_doorbells) {
549                 writel(v, adev->doorbell.ptr + index);
550         } else {
551                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
552         }
553 }
554
555 /**
556  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
557  *
558  * @adev: amdgpu_device pointer
559  * @index: doorbell index
560  *
561  * Returns the value in the doorbell aperture at the
562  * requested doorbell index (VEGA10+).
563  */
564 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
565 {
566         if (adev->in_pci_err_recovery)
567                 return 0;
568
569         if (index < adev->doorbell.num_doorbells) {
570                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
571         } else {
572                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
573                 return 0;
574         }
575 }
576
577 /**
578  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
579  *
580  * @adev: amdgpu_device pointer
581  * @index: doorbell index
582  * @v: value to write
583  *
584  * Writes @v to the doorbell aperture at the
585  * requested doorbell index (VEGA10+).
586  */
587 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
588 {
589         if (adev->in_pci_err_recovery)
590                 return;
591
592         if (index < adev->doorbell.num_doorbells) {
593                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
594         } else {
595                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
596         }
597 }
598
599 /**
600  * amdgpu_device_indirect_rreg - read an indirect register
601  *
602  * @adev: amdgpu_device pointer
603  * @pcie_index: mmio register offset
604  * @pcie_data: mmio register offset
605  * @reg_addr: indirect register address to read from
606  *
607  * Returns the value of indirect register @reg_addr
608  */
609 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
610                                 u32 pcie_index, u32 pcie_data,
611                                 u32 reg_addr)
612 {
613         unsigned long flags;
614         u32 r;
615         void __iomem *pcie_index_offset;
616         void __iomem *pcie_data_offset;
617
618         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
619         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
620         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
621
622         writel(reg_addr, pcie_index_offset);
623         readl(pcie_index_offset);
624         r = readl(pcie_data_offset);
625         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
626
627         return r;
628 }
629
630 /**
631  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
632  *
633  * @adev: amdgpu_device pointer
634  * @pcie_index: mmio register offset
635  * @pcie_data: mmio register offset
636  * @reg_addr: indirect register address to read from
637  *
638  * Returns the value of indirect register @reg_addr
639  */
640 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
641                                   u32 pcie_index, u32 pcie_data,
642                                   u32 reg_addr)
643 {
644         unsigned long flags;
645         u64 r;
646         void __iomem *pcie_index_offset;
647         void __iomem *pcie_data_offset;
648
649         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
650         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
651         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
652
653         /* read low 32 bits */
654         writel(reg_addr, pcie_index_offset);
655         readl(pcie_index_offset);
656         r = readl(pcie_data_offset);
657         /* read high 32 bits */
658         writel(reg_addr + 4, pcie_index_offset);
659         readl(pcie_index_offset);
660         r |= ((u64)readl(pcie_data_offset) << 32);
661         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
662
663         return r;
664 }
665
666 /**
667  * amdgpu_device_indirect_wreg - write an indirect register address
668  *
669  * @adev: amdgpu_device pointer
670  * @pcie_index: mmio register offset
671  * @pcie_data: mmio register offset
672  * @reg_addr: indirect register offset
673  * @reg_data: indirect register data
674  *
675  */
676 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
677                                  u32 pcie_index, u32 pcie_data,
678                                  u32 reg_addr, u32 reg_data)
679 {
680         unsigned long flags;
681         void __iomem *pcie_index_offset;
682         void __iomem *pcie_data_offset;
683
684         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
685         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
686         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
687
688         writel(reg_addr, pcie_index_offset);
689         readl(pcie_index_offset);
690         writel(reg_data, pcie_data_offset);
691         readl(pcie_data_offset);
692         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
693 }
694
695 /**
696  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
697  *
698  * @adev: amdgpu_device pointer
699  * @pcie_index: mmio register offset
700  * @pcie_data: mmio register offset
701  * @reg_addr: indirect register offset
702  * @reg_data: indirect register data
703  *
704  */
705 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
706                                    u32 pcie_index, u32 pcie_data,
707                                    u32 reg_addr, u64 reg_data)
708 {
709         unsigned long flags;
710         void __iomem *pcie_index_offset;
711         void __iomem *pcie_data_offset;
712
713         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
714         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
715         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
716
717         /* write low 32 bits */
718         writel(reg_addr, pcie_index_offset);
719         readl(pcie_index_offset);
720         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
721         readl(pcie_data_offset);
722         /* write high 32 bits */
723         writel(reg_addr + 4, pcie_index_offset);
724         readl(pcie_index_offset);
725         writel((u32)(reg_data >> 32), pcie_data_offset);
726         readl(pcie_data_offset);
727         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
728 }
729
730 /**
731  * amdgpu_invalid_rreg - dummy reg read function
732  *
733  * @adev: amdgpu_device pointer
734  * @reg: offset of register
735  *
736  * Dummy register read function.  Used for register blocks
737  * that certain asics don't have (all asics).
738  * Returns the value in the register.
739  */
740 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
741 {
742         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
743         BUG();
744         return 0;
745 }
746
747 /**
748  * amdgpu_invalid_wreg - dummy reg write function
749  *
750  * @adev: amdgpu_device pointer
751  * @reg: offset of register
752  * @v: value to write to the register
753  *
754  * Dummy register read function.  Used for register blocks
755  * that certain asics don't have (all asics).
756  */
757 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
758 {
759         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
760                   reg, v);
761         BUG();
762 }
763
764 /**
765  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
766  *
767  * @adev: amdgpu_device pointer
768  * @reg: offset of register
769  *
770  * Dummy register read function.  Used for register blocks
771  * that certain asics don't have (all asics).
772  * Returns the value in the register.
773  */
774 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
775 {
776         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
777         BUG();
778         return 0;
779 }
780
781 /**
782  * amdgpu_invalid_wreg64 - dummy reg write function
783  *
784  * @adev: amdgpu_device pointer
785  * @reg: offset of register
786  * @v: value to write to the register
787  *
788  * Dummy register read function.  Used for register blocks
789  * that certain asics don't have (all asics).
790  */
791 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
792 {
793         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
794                   reg, v);
795         BUG();
796 }
797
798 /**
799  * amdgpu_block_invalid_rreg - dummy reg read function
800  *
801  * @adev: amdgpu_device pointer
802  * @block: offset of instance
803  * @reg: offset of register
804  *
805  * Dummy register read function.  Used for register blocks
806  * that certain asics don't have (all asics).
807  * Returns the value in the register.
808  */
809 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
810                                           uint32_t block, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
813                   reg, block);
814         BUG();
815         return 0;
816 }
817
818 /**
819  * amdgpu_block_invalid_wreg - dummy reg write function
820  *
821  * @adev: amdgpu_device pointer
822  * @block: offset of instance
823  * @reg: offset of register
824  * @v: value to write to the register
825  *
826  * Dummy register read function.  Used for register blocks
827  * that certain asics don't have (all asics).
828  */
829 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
830                                       uint32_t block,
831                                       uint32_t reg, uint32_t v)
832 {
833         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
834                   reg, block, v);
835         BUG();
836 }
837
838 /**
839  * amdgpu_device_asic_init - Wrapper for atom asic_init
840  *
841  * @adev: amdgpu_device pointer
842  *
843  * Does any asic specific work and then calls atom asic init.
844  */
845 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
846 {
847         amdgpu_asic_pre_asic_init(adev);
848
849         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
850 }
851
852 /**
853  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
854  *
855  * @adev: amdgpu_device pointer
856  *
857  * Allocates a scratch page of VRAM for use by various things in the
858  * driver.
859  */
860 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
861 {
862         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
863                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
864                                        &adev->vram_scratch.robj,
865                                        &adev->vram_scratch.gpu_addr,
866                                        (void **)&adev->vram_scratch.ptr);
867 }
868
869 /**
870  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
871  *
872  * @adev: amdgpu_device pointer
873  *
874  * Frees the VRAM scratch page.
875  */
876 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
877 {
878         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
879 }
880
881 /**
882  * amdgpu_device_program_register_sequence - program an array of registers.
883  *
884  * @adev: amdgpu_device pointer
885  * @registers: pointer to the register array
886  * @array_size: size of the register array
887  *
888  * Programs an array or registers with and and or masks.
889  * This is a helper for setting golden registers.
890  */
891 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
892                                              const u32 *registers,
893                                              const u32 array_size)
894 {
895         u32 tmp, reg, and_mask, or_mask;
896         int i;
897
898         if (array_size % 3)
899                 return;
900
901         for (i = 0; i < array_size; i +=3) {
902                 reg = registers[i + 0];
903                 and_mask = registers[i + 1];
904                 or_mask = registers[i + 2];
905
906                 if (and_mask == 0xffffffff) {
907                         tmp = or_mask;
908                 } else {
909                         tmp = RREG32(reg);
910                         tmp &= ~and_mask;
911                         if (adev->family >= AMDGPU_FAMILY_AI)
912                                 tmp |= (or_mask & and_mask);
913                         else
914                                 tmp |= or_mask;
915                 }
916                 WREG32(reg, tmp);
917         }
918 }
919
920 /**
921  * amdgpu_device_pci_config_reset - reset the GPU
922  *
923  * @adev: amdgpu_device pointer
924  *
925  * Resets the GPU using the pci config reset sequence.
926  * Only applicable to asics prior to vega10.
927  */
928 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
929 {
930         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
931 }
932
933 /*
934  * GPU doorbell aperture helpers function.
935  */
936 /**
937  * amdgpu_device_doorbell_init - Init doorbell driver information.
938  *
939  * @adev: amdgpu_device pointer
940  *
941  * Init doorbell driver information (CIK)
942  * Returns 0 on success, error on failure.
943  */
944 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
945 {
946
947         /* No doorbell on SI hardware generation */
948         if (adev->asic_type < CHIP_BONAIRE) {
949                 adev->doorbell.base = 0;
950                 adev->doorbell.size = 0;
951                 adev->doorbell.num_doorbells = 0;
952                 adev->doorbell.ptr = NULL;
953                 return 0;
954         }
955
956         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
957                 return -EINVAL;
958
959         amdgpu_asic_init_doorbell_index(adev);
960
961         /* doorbell bar mapping */
962         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
963         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
964
965         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
966                                              adev->doorbell_index.max_assignment+1);
967         if (adev->doorbell.num_doorbells == 0)
968                 return -EINVAL;
969
970         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
971          * paging queue doorbell use the second page. The
972          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
973          * doorbells are in the first page. So with paging queue enabled,
974          * the max num_doorbells should + 1 page (0x400 in dword)
975          */
976         if (adev->asic_type >= CHIP_VEGA10)
977                 adev->doorbell.num_doorbells += 0x400;
978
979         adev->doorbell.ptr = ioremap(adev->doorbell.base,
980                                      adev->doorbell.num_doorbells *
981                                      sizeof(u32));
982         if (adev->doorbell.ptr == NULL)
983                 return -ENOMEM;
984
985         return 0;
986 }
987
988 /**
989  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
990  *
991  * @adev: amdgpu_device pointer
992  *
993  * Tear down doorbell driver information (CIK)
994  */
995 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
996 {
997         iounmap(adev->doorbell.ptr);
998         adev->doorbell.ptr = NULL;
999 }
1000
1001
1002
1003 /*
1004  * amdgpu_device_wb_*()
1005  * Writeback is the method by which the GPU updates special pages in memory
1006  * with the status of certain GPU events (fences, ring pointers,etc.).
1007  */
1008
1009 /**
1010  * amdgpu_device_wb_fini - Disable Writeback and free memory
1011  *
1012  * @adev: amdgpu_device pointer
1013  *
1014  * Disables Writeback and frees the Writeback memory (all asics).
1015  * Used at driver shutdown.
1016  */
1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 {
1019         if (adev->wb.wb_obj) {
1020                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021                                       &adev->wb.gpu_addr,
1022                                       (void **)&adev->wb.wb);
1023                 adev->wb.wb_obj = NULL;
1024         }
1025 }
1026
1027 /**
1028  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1029  *
1030  * @adev: amdgpu_device pointer
1031  *
1032  * Initializes writeback and allocates writeback memory (all asics).
1033  * Used at driver startup.
1034  * Returns 0 on success or an -error on failure.
1035  */
1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1037 {
1038         int r;
1039
1040         if (adev->wb.wb_obj == NULL) {
1041                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1042                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1043                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1044                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045                                             (void **)&adev->wb.wb);
1046                 if (r) {
1047                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1048                         return r;
1049                 }
1050
1051                 adev->wb.num_wb = AMDGPU_MAX_WB;
1052                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053
1054                 /* clear wb memory */
1055                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1056         }
1057
1058         return 0;
1059 }
1060
1061 /**
1062  * amdgpu_device_wb_get - Allocate a wb entry
1063  *
1064  * @adev: amdgpu_device pointer
1065  * @wb: wb index
1066  *
1067  * Allocate a wb slot for use by the driver (all asics).
1068  * Returns 0 on success or -EINVAL on failure.
1069  */
1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 {
1072         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1073
1074         if (offset < adev->wb.num_wb) {
1075                 __set_bit(offset, adev->wb.used);
1076                 *wb = offset << 3; /* convert to dw offset */
1077                 return 0;
1078         } else {
1079                 return -EINVAL;
1080         }
1081 }
1082
1083 /**
1084  * amdgpu_device_wb_free - Free a wb entry
1085  *
1086  * @adev: amdgpu_device pointer
1087  * @wb: wb index
1088  *
1089  * Free a wb slot allocated for use by the driver (all asics)
1090  */
1091 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1092 {
1093         wb >>= 3;
1094         if (wb < adev->wb.num_wb)
1095                 __clear_bit(wb, adev->wb.used);
1096 }
1097
1098 /**
1099  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1100  *
1101  * @adev: amdgpu_device pointer
1102  *
1103  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1104  * to fail, but if any of the BARs is not accessible after the size we abort
1105  * driver loading by returning -ENODEV.
1106  */
1107 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1108 {
1109         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1110         struct pci_bus *root;
1111         struct resource *res;
1112         unsigned i;
1113         u16 cmd;
1114         int r;
1115
1116         /* Bypass for VF */
1117         if (amdgpu_sriov_vf(adev))
1118                 return 0;
1119
1120         /* skip if the bios has already enabled large BAR */
1121         if (adev->gmc.real_vram_size &&
1122             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1123                 return 0;
1124
1125         /* Check if the root BUS has 64bit memory resources */
1126         root = adev->pdev->bus;
1127         while (root->parent)
1128                 root = root->parent;
1129
1130         pci_bus_for_each_resource(root, res, i) {
1131                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1132                     res->start > 0x100000000ull)
1133                         break;
1134         }
1135
1136         /* Trying to resize is pointless without a root hub window above 4GB */
1137         if (!res)
1138                 return 0;
1139
1140         /* Limit the BAR size to what is available */
1141         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1142                         rbar_size);
1143
1144         /* Disable memory decoding while we change the BAR addresses and size */
1145         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1146         pci_write_config_word(adev->pdev, PCI_COMMAND,
1147                               cmd & ~PCI_COMMAND_MEMORY);
1148
1149         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1150         amdgpu_device_doorbell_fini(adev);
1151         if (adev->asic_type >= CHIP_BONAIRE)
1152                 pci_release_resource(adev->pdev, 2);
1153
1154         pci_release_resource(adev->pdev, 0);
1155
1156         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1157         if (r == -ENOSPC)
1158                 DRM_INFO("Not enough PCI address space for a large BAR.");
1159         else if (r && r != -ENOTSUPP)
1160                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1161
1162         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1163
1164         /* When the doorbell or fb BAR isn't available we have no chance of
1165          * using the device.
1166          */
1167         r = amdgpu_device_doorbell_init(adev);
1168         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1169                 return -ENODEV;
1170
1171         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1172
1173         return 0;
1174 }
1175
1176 /*
1177  * GPU helpers function.
1178  */
1179 /**
1180  * amdgpu_device_need_post - check if the hw need post or not
1181  *
1182  * @adev: amdgpu_device pointer
1183  *
1184  * Check if the asic has been initialized (all asics) at driver startup
1185  * or post is needed if  hw reset is performed.
1186  * Returns true if need or false if not.
1187  */
1188 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1189 {
1190         uint32_t reg;
1191
1192         if (amdgpu_sriov_vf(adev))
1193                 return false;
1194
1195         if (amdgpu_passthrough(adev)) {
1196                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1197                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1198                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1199                  * vpost executed for smc version below 22.15
1200                  */
1201                 if (adev->asic_type == CHIP_FIJI) {
1202                         int err;
1203                         uint32_t fw_ver;
1204                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1205                         /* force vPost if error occured */
1206                         if (err)
1207                                 return true;
1208
1209                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1210                         if (fw_ver < 0x00160e00)
1211                                 return true;
1212                 }
1213         }
1214
1215         if (adev->has_hw_reset) {
1216                 adev->has_hw_reset = false;
1217                 return true;
1218         }
1219
1220         /* bios scratch used on CIK+ */
1221         if (adev->asic_type >= CHIP_BONAIRE)
1222                 return amdgpu_atombios_scratch_need_asic_init(adev);
1223
1224         /* check MEM_SIZE for older asics */
1225         reg = amdgpu_asic_get_config_memsize(adev);
1226
1227         if ((reg != 0) && (reg != 0xffffffff))
1228                 return false;
1229
1230         return true;
1231 }
1232
1233 /* if we get transitioned to only one device, take VGA back */
1234 /**
1235  * amdgpu_device_vga_set_decode - enable/disable vga decode
1236  *
1237  * @cookie: amdgpu_device pointer
1238  * @state: enable/disable vga decode
1239  *
1240  * Enable/disable vga decode (all asics).
1241  * Returns VGA resource flags.
1242  */
1243 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1244 {
1245         struct amdgpu_device *adev = cookie;
1246         amdgpu_asic_set_vga_state(adev, state);
1247         if (state)
1248                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1249                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1250         else
1251                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1252 }
1253
1254 /**
1255  * amdgpu_device_check_block_size - validate the vm block size
1256  *
1257  * @adev: amdgpu_device pointer
1258  *
1259  * Validates the vm block size specified via module parameter.
1260  * The vm block size defines number of bits in page table versus page directory,
1261  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1262  * page table and the remaining bits are in the page directory.
1263  */
1264 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1265 {
1266         /* defines number of bits in page table versus page directory,
1267          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1268          * page table and the remaining bits are in the page directory */
1269         if (amdgpu_vm_block_size == -1)
1270                 return;
1271
1272         if (amdgpu_vm_block_size < 9) {
1273                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1274                          amdgpu_vm_block_size);
1275                 amdgpu_vm_block_size = -1;
1276         }
1277 }
1278
1279 /**
1280  * amdgpu_device_check_vm_size - validate the vm size
1281  *
1282  * @adev: amdgpu_device pointer
1283  *
1284  * Validates the vm size in GB specified via module parameter.
1285  * The VM size is the size of the GPU virtual memory space in GB.
1286  */
1287 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1288 {
1289         /* no need to check the default value */
1290         if (amdgpu_vm_size == -1)
1291                 return;
1292
1293         if (amdgpu_vm_size < 1) {
1294                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1295                          amdgpu_vm_size);
1296                 amdgpu_vm_size = -1;
1297         }
1298 }
1299
1300 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1301 {
1302         struct sysinfo si;
1303         bool is_os_64 = (sizeof(void *) == 8);
1304         uint64_t total_memory;
1305         uint64_t dram_size_seven_GB = 0x1B8000000;
1306         uint64_t dram_size_three_GB = 0xB8000000;
1307
1308         if (amdgpu_smu_memory_pool_size == 0)
1309                 return;
1310
1311         if (!is_os_64) {
1312                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1313                 goto def_value;
1314         }
1315         si_meminfo(&si);
1316         total_memory = (uint64_t)si.totalram * si.mem_unit;
1317
1318         if ((amdgpu_smu_memory_pool_size == 1) ||
1319                 (amdgpu_smu_memory_pool_size == 2)) {
1320                 if (total_memory < dram_size_three_GB)
1321                         goto def_value1;
1322         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1323                 (amdgpu_smu_memory_pool_size == 8)) {
1324                 if (total_memory < dram_size_seven_GB)
1325                         goto def_value1;
1326         } else {
1327                 DRM_WARN("Smu memory pool size not supported\n");
1328                 goto def_value;
1329         }
1330         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1331
1332         return;
1333
1334 def_value1:
1335         DRM_WARN("No enough system memory\n");
1336 def_value:
1337         adev->pm.smu_prv_buffer_size = 0;
1338 }
1339
1340 /**
1341  * amdgpu_device_check_arguments - validate module params
1342  *
1343  * @adev: amdgpu_device pointer
1344  *
1345  * Validates certain module parameters and updates
1346  * the associated values used by the driver (all asics).
1347  */
1348 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1349 {
1350         if (amdgpu_sched_jobs < 4) {
1351                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1352                          amdgpu_sched_jobs);
1353                 amdgpu_sched_jobs = 4;
1354         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1355                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1356                          amdgpu_sched_jobs);
1357                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1358         }
1359
1360         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1361                 /* gart size must be greater or equal to 32M */
1362                 dev_warn(adev->dev, "gart size (%d) too small\n",
1363                          amdgpu_gart_size);
1364                 amdgpu_gart_size = -1;
1365         }
1366
1367         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1368                 /* gtt size must be greater or equal to 32M */
1369                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1370                                  amdgpu_gtt_size);
1371                 amdgpu_gtt_size = -1;
1372         }
1373
1374         /* valid range is between 4 and 9 inclusive */
1375         if (amdgpu_vm_fragment_size != -1 &&
1376             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1377                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1378                 amdgpu_vm_fragment_size = -1;
1379         }
1380
1381         if (amdgpu_sched_hw_submission < 2) {
1382                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1383                          amdgpu_sched_hw_submission);
1384                 amdgpu_sched_hw_submission = 2;
1385         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1386                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1387                          amdgpu_sched_hw_submission);
1388                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1389         }
1390
1391         amdgpu_device_check_smu_prv_buffer_size(adev);
1392
1393         amdgpu_device_check_vm_size(adev);
1394
1395         amdgpu_device_check_block_size(adev);
1396
1397         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1398
1399         amdgpu_gmc_tmz_set(adev);
1400
1401         amdgpu_gmc_noretry_set(adev);
1402
1403         return 0;
1404 }
1405
1406 /**
1407  * amdgpu_switcheroo_set_state - set switcheroo state
1408  *
1409  * @pdev: pci dev pointer
1410  * @state: vga_switcheroo state
1411  *
1412  * Callback for the switcheroo driver.  Suspends or resumes the
1413  * the asics before or after it is powered up using ACPI methods.
1414  */
1415 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1416                                         enum vga_switcheroo_state state)
1417 {
1418         struct drm_device *dev = pci_get_drvdata(pdev);
1419         int r;
1420
1421         if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
1422                 return;
1423
1424         if (state == VGA_SWITCHEROO_ON) {
1425                 pr_info("switched on\n");
1426                 /* don't suspend or resume card normally */
1427                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1428
1429                 pci_set_power_state(pdev, PCI_D0);
1430                 amdgpu_device_load_pci_state(pdev);
1431                 r = pci_enable_device(pdev);
1432                 if (r)
1433                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1434                 amdgpu_device_resume(dev, true);
1435
1436                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1437                 drm_kms_helper_poll_enable(dev);
1438         } else {
1439                 pr_info("switched off\n");
1440                 drm_kms_helper_poll_disable(dev);
1441                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1442                 amdgpu_device_suspend(dev, true);
1443                 amdgpu_device_cache_pci_state(pdev);
1444                 /* Shut down the device */
1445                 pci_disable_device(pdev);
1446                 pci_set_power_state(pdev, PCI_D3cold);
1447                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1448         }
1449 }
1450
1451 /**
1452  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1453  *
1454  * @pdev: pci dev pointer
1455  *
1456  * Callback for the switcheroo driver.  Check of the switcheroo
1457  * state can be changed.
1458  * Returns true if the state can be changed, false if not.
1459  */
1460 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1461 {
1462         struct drm_device *dev = pci_get_drvdata(pdev);
1463
1464         /*
1465         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1466         * locking inversion with the driver load path. And the access here is
1467         * completely racy anyway. So don't bother with locking for now.
1468         */
1469         return atomic_read(&dev->open_count) == 0;
1470 }
1471
1472 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1473         .set_gpu_state = amdgpu_switcheroo_set_state,
1474         .reprobe = NULL,
1475         .can_switch = amdgpu_switcheroo_can_switch,
1476 };
1477
1478 /**
1479  * amdgpu_device_ip_set_clockgating_state - set the CG state
1480  *
1481  * @dev: amdgpu_device pointer
1482  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1483  * @state: clockgating state (gate or ungate)
1484  *
1485  * Sets the requested clockgating state for all instances of
1486  * the hardware IP specified.
1487  * Returns the error code from the last instance.
1488  */
1489 int amdgpu_device_ip_set_clockgating_state(void *dev,
1490                                            enum amd_ip_block_type block_type,
1491                                            enum amd_clockgating_state state)
1492 {
1493         struct amdgpu_device *adev = dev;
1494         int i, r = 0;
1495
1496         for (i = 0; i < adev->num_ip_blocks; i++) {
1497                 if (!adev->ip_blocks[i].status.valid)
1498                         continue;
1499                 if (adev->ip_blocks[i].version->type != block_type)
1500                         continue;
1501                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1502                         continue;
1503                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1504                         (void *)adev, state);
1505                 if (r)
1506                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1507                                   adev->ip_blocks[i].version->funcs->name, r);
1508         }
1509         return r;
1510 }
1511
1512 /**
1513  * amdgpu_device_ip_set_powergating_state - set the PG state
1514  *
1515  * @dev: amdgpu_device pointer
1516  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1517  * @state: powergating state (gate or ungate)
1518  *
1519  * Sets the requested powergating state for all instances of
1520  * the hardware IP specified.
1521  * Returns the error code from the last instance.
1522  */
1523 int amdgpu_device_ip_set_powergating_state(void *dev,
1524                                            enum amd_ip_block_type block_type,
1525                                            enum amd_powergating_state state)
1526 {
1527         struct amdgpu_device *adev = dev;
1528         int i, r = 0;
1529
1530         for (i = 0; i < adev->num_ip_blocks; i++) {
1531                 if (!adev->ip_blocks[i].status.valid)
1532                         continue;
1533                 if (adev->ip_blocks[i].version->type != block_type)
1534                         continue;
1535                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1536                         continue;
1537                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1538                         (void *)adev, state);
1539                 if (r)
1540                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1541                                   adev->ip_blocks[i].version->funcs->name, r);
1542         }
1543         return r;
1544 }
1545
1546 /**
1547  * amdgpu_device_ip_get_clockgating_state - get the CG state
1548  *
1549  * @adev: amdgpu_device pointer
1550  * @flags: clockgating feature flags
1551  *
1552  * Walks the list of IPs on the device and updates the clockgating
1553  * flags for each IP.
1554  * Updates @flags with the feature flags for each hardware IP where
1555  * clockgating is enabled.
1556  */
1557 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1558                                             u32 *flags)
1559 {
1560         int i;
1561
1562         for (i = 0; i < adev->num_ip_blocks; i++) {
1563                 if (!adev->ip_blocks[i].status.valid)
1564                         continue;
1565                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1566                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1567         }
1568 }
1569
1570 /**
1571  * amdgpu_device_ip_wait_for_idle - wait for idle
1572  *
1573  * @adev: amdgpu_device pointer
1574  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1575  *
1576  * Waits for the request hardware IP to be idle.
1577  * Returns 0 for success or a negative error code on failure.
1578  */
1579 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1580                                    enum amd_ip_block_type block_type)
1581 {
1582         int i, r;
1583
1584         for (i = 0; i < adev->num_ip_blocks; i++) {
1585                 if (!adev->ip_blocks[i].status.valid)
1586                         continue;
1587                 if (adev->ip_blocks[i].version->type == block_type) {
1588                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1589                         if (r)
1590                                 return r;
1591                         break;
1592                 }
1593         }
1594         return 0;
1595
1596 }
1597
1598 /**
1599  * amdgpu_device_ip_is_idle - is the hardware IP idle
1600  *
1601  * @adev: amdgpu_device pointer
1602  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1603  *
1604  * Check if the hardware IP is idle or not.
1605  * Returns true if it the IP is idle, false if not.
1606  */
1607 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1608                               enum amd_ip_block_type block_type)
1609 {
1610         int i;
1611
1612         for (i = 0; i < adev->num_ip_blocks; i++) {
1613                 if (!adev->ip_blocks[i].status.valid)
1614                         continue;
1615                 if (adev->ip_blocks[i].version->type == block_type)
1616                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1617         }
1618         return true;
1619
1620 }
1621
1622 /**
1623  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1624  *
1625  * @adev: amdgpu_device pointer
1626  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1627  *
1628  * Returns a pointer to the hardware IP block structure
1629  * if it exists for the asic, otherwise NULL.
1630  */
1631 struct amdgpu_ip_block *
1632 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1633                               enum amd_ip_block_type type)
1634 {
1635         int i;
1636
1637         for (i = 0; i < adev->num_ip_blocks; i++)
1638                 if (adev->ip_blocks[i].version->type == type)
1639                         return &adev->ip_blocks[i];
1640
1641         return NULL;
1642 }
1643
1644 /**
1645  * amdgpu_device_ip_block_version_cmp
1646  *
1647  * @adev: amdgpu_device pointer
1648  * @type: enum amd_ip_block_type
1649  * @major: major version
1650  * @minor: minor version
1651  *
1652  * return 0 if equal or greater
1653  * return 1 if smaller or the ip_block doesn't exist
1654  */
1655 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1656                                        enum amd_ip_block_type type,
1657                                        u32 major, u32 minor)
1658 {
1659         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1660
1661         if (ip_block && ((ip_block->version->major > major) ||
1662                         ((ip_block->version->major == major) &&
1663                         (ip_block->version->minor >= minor))))
1664                 return 0;
1665
1666         return 1;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_block_add
1671  *
1672  * @adev: amdgpu_device pointer
1673  * @ip_block_version: pointer to the IP to add
1674  *
1675  * Adds the IP block driver information to the collection of IPs
1676  * on the asic.
1677  */
1678 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1679                                const struct amdgpu_ip_block_version *ip_block_version)
1680 {
1681         if (!ip_block_version)
1682                 return -EINVAL;
1683
1684         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1685                   ip_block_version->funcs->name);
1686
1687         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1688
1689         return 0;
1690 }
1691
1692 /**
1693  * amdgpu_device_enable_virtual_display - enable virtual display feature
1694  *
1695  * @adev: amdgpu_device pointer
1696  *
1697  * Enabled the virtual display feature if the user has enabled it via
1698  * the module parameter virtual_display.  This feature provides a virtual
1699  * display hardware on headless boards or in virtualized environments.
1700  * This function parses and validates the configuration string specified by
1701  * the user and configues the virtual display configuration (number of
1702  * virtual connectors, crtcs, etc.) specified.
1703  */
1704 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1705 {
1706         adev->enable_virtual_display = false;
1707
1708         if (amdgpu_virtual_display) {
1709                 const char *pci_address_name = pci_name(adev->pdev);
1710                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1711
1712                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1713                 pciaddstr_tmp = pciaddstr;
1714                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1715                         pciaddname = strsep(&pciaddname_tmp, ",");
1716                         if (!strcmp("all", pciaddname)
1717                             || !strcmp(pci_address_name, pciaddname)) {
1718                                 long num_crtc;
1719                                 int res = -1;
1720
1721                                 adev->enable_virtual_display = true;
1722
1723                                 if (pciaddname_tmp)
1724                                         res = kstrtol(pciaddname_tmp, 10,
1725                                                       &num_crtc);
1726
1727                                 if (!res) {
1728                                         if (num_crtc < 1)
1729                                                 num_crtc = 1;
1730                                         if (num_crtc > 6)
1731                                                 num_crtc = 6;
1732                                         adev->mode_info.num_crtc = num_crtc;
1733                                 } else {
1734                                         adev->mode_info.num_crtc = 1;
1735                                 }
1736                                 break;
1737                         }
1738                 }
1739
1740                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1741                          amdgpu_virtual_display, pci_address_name,
1742                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1743
1744                 kfree(pciaddstr);
1745         }
1746 }
1747
1748 /**
1749  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1750  *
1751  * @adev: amdgpu_device pointer
1752  *
1753  * Parses the asic configuration parameters specified in the gpu info
1754  * firmware and makes them availale to the driver for use in configuring
1755  * the asic.
1756  * Returns 0 on success, -EINVAL on failure.
1757  */
1758 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1759 {
1760         const char *chip_name;
1761         char fw_name[40];
1762         int err;
1763         const struct gpu_info_firmware_header_v1_0 *hdr;
1764
1765         adev->firmware.gpu_info_fw = NULL;
1766
1767         if (adev->mman.discovery_bin) {
1768                 amdgpu_discovery_get_gfx_info(adev);
1769
1770                 /*
1771                  * FIXME: The bounding box is still needed by Navi12, so
1772                  * temporarily read it from gpu_info firmware. Should be droped
1773                  * when DAL no longer needs it.
1774                  */
1775                 if (adev->asic_type != CHIP_NAVI12)
1776                         return 0;
1777         }
1778
1779         switch (adev->asic_type) {
1780 #ifdef CONFIG_DRM_AMDGPU_SI
1781         case CHIP_VERDE:
1782         case CHIP_TAHITI:
1783         case CHIP_PITCAIRN:
1784         case CHIP_OLAND:
1785         case CHIP_HAINAN:
1786 #endif
1787 #ifdef CONFIG_DRM_AMDGPU_CIK
1788         case CHIP_BONAIRE:
1789         case CHIP_HAWAII:
1790         case CHIP_KAVERI:
1791         case CHIP_KABINI:
1792         case CHIP_MULLINS:
1793 #endif
1794         case CHIP_TOPAZ:
1795         case CHIP_TONGA:
1796         case CHIP_FIJI:
1797         case CHIP_POLARIS10:
1798         case CHIP_POLARIS11:
1799         case CHIP_POLARIS12:
1800         case CHIP_VEGAM:
1801         case CHIP_CARRIZO:
1802         case CHIP_STONEY:
1803         case CHIP_VEGA20:
1804         case CHIP_SIENNA_CICHLID:
1805         case CHIP_NAVY_FLOUNDER:
1806         case CHIP_DIMGREY_CAVEFISH:
1807         default:
1808                 return 0;
1809         case CHIP_VEGA10:
1810                 chip_name = "vega10";
1811                 break;
1812         case CHIP_VEGA12:
1813                 chip_name = "vega12";
1814                 break;
1815         case CHIP_RAVEN:
1816                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1817                         chip_name = "raven2";
1818                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1819                         chip_name = "picasso";
1820                 else
1821                         chip_name = "raven";
1822                 break;
1823         case CHIP_ARCTURUS:
1824                 chip_name = "arcturus";
1825                 break;
1826         case CHIP_RENOIR:
1827                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1828                         chip_name = "renoir";
1829                 else
1830                         chip_name = "green_sardine";
1831                 break;
1832         case CHIP_NAVI10:
1833                 chip_name = "navi10";
1834                 break;
1835         case CHIP_NAVI14:
1836                 chip_name = "navi14";
1837                 break;
1838         case CHIP_NAVI12:
1839                 chip_name = "navi12";
1840                 break;
1841         case CHIP_VANGOGH:
1842                 chip_name = "vangogh";
1843                 break;
1844         }
1845
1846         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1847         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1848         if (err) {
1849                 dev_err(adev->dev,
1850                         "Failed to load gpu_info firmware \"%s\"\n",
1851                         fw_name);
1852                 goto out;
1853         }
1854         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1855         if (err) {
1856                 dev_err(adev->dev,
1857                         "Failed to validate gpu_info firmware \"%s\"\n",
1858                         fw_name);
1859                 goto out;
1860         }
1861
1862         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1863         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1864
1865         switch (hdr->version_major) {
1866         case 1:
1867         {
1868                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1869                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1870                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1871
1872                 /*
1873                  * Should be droped when DAL no longer needs it.
1874                  */
1875                 if (adev->asic_type == CHIP_NAVI12)
1876                         goto parse_soc_bounding_box;
1877
1878                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1879                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1880                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1881                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1882                 adev->gfx.config.max_texture_channel_caches =
1883                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1884                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1885                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1886                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1887                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1888                 adev->gfx.config.double_offchip_lds_buf =
1889                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1890                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1891                 adev->gfx.cu_info.max_waves_per_simd =
1892                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1893                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1894                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1895                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1896                 if (hdr->version_minor >= 1) {
1897                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1898                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1899                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1900                         adev->gfx.config.num_sc_per_sh =
1901                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1902                         adev->gfx.config.num_packer_per_sc =
1903                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1904                 }
1905
1906 parse_soc_bounding_box:
1907                 /*
1908                  * soc bounding box info is not integrated in disocovery table,
1909                  * we always need to parse it from gpu info firmware if needed.
1910                  */
1911                 if (hdr->version_minor == 2) {
1912                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1913                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1914                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1915                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1916                 }
1917                 break;
1918         }
1919         default:
1920                 dev_err(adev->dev,
1921                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1922                 err = -EINVAL;
1923                 goto out;
1924         }
1925 out:
1926         return err;
1927 }
1928
1929 /**
1930  * amdgpu_device_ip_early_init - run early init for hardware IPs
1931  *
1932  * @adev: amdgpu_device pointer
1933  *
1934  * Early initialization pass for hardware IPs.  The hardware IPs that make
1935  * up each asic are discovered each IP's early_init callback is run.  This
1936  * is the first stage in initializing the asic.
1937  * Returns 0 on success, negative error code on failure.
1938  */
1939 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1940 {
1941         int i, r;
1942
1943         amdgpu_device_enable_virtual_display(adev);
1944
1945         if (amdgpu_sriov_vf(adev)) {
1946                 r = amdgpu_virt_request_full_gpu(adev, true);
1947                 if (r)
1948                         return r;
1949         }
1950
1951         switch (adev->asic_type) {
1952 #ifdef CONFIG_DRM_AMDGPU_SI
1953         case CHIP_VERDE:
1954         case CHIP_TAHITI:
1955         case CHIP_PITCAIRN:
1956         case CHIP_OLAND:
1957         case CHIP_HAINAN:
1958                 adev->family = AMDGPU_FAMILY_SI;
1959                 r = si_set_ip_blocks(adev);
1960                 if (r)
1961                         return r;
1962                 break;
1963 #endif
1964 #ifdef CONFIG_DRM_AMDGPU_CIK
1965         case CHIP_BONAIRE:
1966         case CHIP_HAWAII:
1967         case CHIP_KAVERI:
1968         case CHIP_KABINI:
1969         case CHIP_MULLINS:
1970                 if (adev->flags & AMD_IS_APU)
1971                         adev->family = AMDGPU_FAMILY_KV;
1972                 else
1973                         adev->family = AMDGPU_FAMILY_CI;
1974
1975                 r = cik_set_ip_blocks(adev);
1976                 if (r)
1977                         return r;
1978                 break;
1979 #endif
1980         case CHIP_TOPAZ:
1981         case CHIP_TONGA:
1982         case CHIP_FIJI:
1983         case CHIP_POLARIS10:
1984         case CHIP_POLARIS11:
1985         case CHIP_POLARIS12:
1986         case CHIP_VEGAM:
1987         case CHIP_CARRIZO:
1988         case CHIP_STONEY:
1989                 if (adev->flags & AMD_IS_APU)
1990                         adev->family = AMDGPU_FAMILY_CZ;
1991                 else
1992                         adev->family = AMDGPU_FAMILY_VI;
1993
1994                 r = vi_set_ip_blocks(adev);
1995                 if (r)
1996                         return r;
1997                 break;
1998         case CHIP_VEGA10:
1999         case CHIP_VEGA12:
2000         case CHIP_VEGA20:
2001         case CHIP_RAVEN:
2002         case CHIP_ARCTURUS:
2003         case CHIP_RENOIR:
2004                 if (adev->flags & AMD_IS_APU)
2005                         adev->family = AMDGPU_FAMILY_RV;
2006                 else
2007                         adev->family = AMDGPU_FAMILY_AI;
2008
2009                 r = soc15_set_ip_blocks(adev);
2010                 if (r)
2011                         return r;
2012                 break;
2013         case  CHIP_NAVI10:
2014         case  CHIP_NAVI14:
2015         case  CHIP_NAVI12:
2016         case  CHIP_SIENNA_CICHLID:
2017         case  CHIP_NAVY_FLOUNDER:
2018         case  CHIP_DIMGREY_CAVEFISH:
2019         case CHIP_VANGOGH:
2020                 if (adev->asic_type == CHIP_VANGOGH)
2021                         adev->family = AMDGPU_FAMILY_VGH;
2022                 else
2023                         adev->family = AMDGPU_FAMILY_NV;
2024
2025                 r = nv_set_ip_blocks(adev);
2026                 if (r)
2027                         return r;
2028                 break;
2029         default:
2030                 /* FIXME: not supported yet */
2031                 return -EINVAL;
2032         }
2033
2034         amdgpu_amdkfd_device_probe(adev);
2035
2036         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2037         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2038                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2039
2040         for (i = 0; i < adev->num_ip_blocks; i++) {
2041                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2042                         DRM_ERROR("disabled ip block: %d <%s>\n",
2043                                   i, adev->ip_blocks[i].version->funcs->name);
2044                         adev->ip_blocks[i].status.valid = false;
2045                 } else {
2046                         if (adev->ip_blocks[i].version->funcs->early_init) {
2047                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2048                                 if (r == -ENOENT) {
2049                                         adev->ip_blocks[i].status.valid = false;
2050                                 } else if (r) {
2051                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2052                                                   adev->ip_blocks[i].version->funcs->name, r);
2053                                         return r;
2054                                 } else {
2055                                         adev->ip_blocks[i].status.valid = true;
2056                                 }
2057                         } else {
2058                                 adev->ip_blocks[i].status.valid = true;
2059                         }
2060                 }
2061                 /* get the vbios after the asic_funcs are set up */
2062                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2063                         r = amdgpu_device_parse_gpu_info_fw(adev);
2064                         if (r)
2065                                 return r;
2066
2067                         /* Read BIOS */
2068                         if (!amdgpu_get_bios(adev))
2069                                 return -EINVAL;
2070
2071                         r = amdgpu_atombios_init(adev);
2072                         if (r) {
2073                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2074                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2075                                 return r;
2076                         }
2077                 }
2078         }
2079
2080         adev->cg_flags &= amdgpu_cg_mask;
2081         adev->pg_flags &= amdgpu_pg_mask;
2082
2083         return 0;
2084 }
2085
2086 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2087 {
2088         int i, r;
2089
2090         for (i = 0; i < adev->num_ip_blocks; i++) {
2091                 if (!adev->ip_blocks[i].status.sw)
2092                         continue;
2093                 if (adev->ip_blocks[i].status.hw)
2094                         continue;
2095                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2096                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2097                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2098                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2099                         if (r) {
2100                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2101                                           adev->ip_blocks[i].version->funcs->name, r);
2102                                 return r;
2103                         }
2104                         adev->ip_blocks[i].status.hw = true;
2105                 }
2106         }
2107
2108         return 0;
2109 }
2110
2111 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2112 {
2113         int i, r;
2114
2115         for (i = 0; i < adev->num_ip_blocks; i++) {
2116                 if (!adev->ip_blocks[i].status.sw)
2117                         continue;
2118                 if (adev->ip_blocks[i].status.hw)
2119                         continue;
2120                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2121                 if (r) {
2122                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2123                                   adev->ip_blocks[i].version->funcs->name, r);
2124                         return r;
2125                 }
2126                 adev->ip_blocks[i].status.hw = true;
2127         }
2128
2129         return 0;
2130 }
2131
2132 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2133 {
2134         int r = 0;
2135         int i;
2136         uint32_t smu_version;
2137
2138         if (adev->asic_type >= CHIP_VEGA10) {
2139                 for (i = 0; i < adev->num_ip_blocks; i++) {
2140                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2141                                 continue;
2142
2143                         /* no need to do the fw loading again if already done*/
2144                         if (adev->ip_blocks[i].status.hw == true)
2145                                 break;
2146
2147                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2148                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2149                                 if (r) {
2150                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2151                                                           adev->ip_blocks[i].version->funcs->name, r);
2152                                         return r;
2153                                 }
2154                         } else {
2155                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2156                                 if (r) {
2157                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2158                                                           adev->ip_blocks[i].version->funcs->name, r);
2159                                         return r;
2160                                 }
2161                         }
2162
2163                         adev->ip_blocks[i].status.hw = true;
2164                         break;
2165                 }
2166         }
2167
2168         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2169                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2170
2171         return r;
2172 }
2173
2174 /**
2175  * amdgpu_device_ip_init - run init for hardware IPs
2176  *
2177  * @adev: amdgpu_device pointer
2178  *
2179  * Main initialization pass for hardware IPs.  The list of all the hardware
2180  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2181  * are run.  sw_init initializes the software state associated with each IP
2182  * and hw_init initializes the hardware associated with each IP.
2183  * Returns 0 on success, negative error code on failure.
2184  */
2185 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2186 {
2187         int i, r;
2188
2189         r = amdgpu_ras_init(adev);
2190         if (r)
2191                 return r;
2192
2193         for (i = 0; i < adev->num_ip_blocks; i++) {
2194                 if (!adev->ip_blocks[i].status.valid)
2195                         continue;
2196                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2197                 if (r) {
2198                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2199                                   adev->ip_blocks[i].version->funcs->name, r);
2200                         goto init_failed;
2201                 }
2202                 adev->ip_blocks[i].status.sw = true;
2203
2204                 /* need to do gmc hw init early so we can allocate gpu mem */
2205                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2206                         r = amdgpu_device_vram_scratch_init(adev);
2207                         if (r) {
2208                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2209                                 goto init_failed;
2210                         }
2211                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2212                         if (r) {
2213                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2214                                 goto init_failed;
2215                         }
2216                         r = amdgpu_device_wb_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2219                                 goto init_failed;
2220                         }
2221                         adev->ip_blocks[i].status.hw = true;
2222
2223                         /* right after GMC hw init, we create CSA */
2224                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2225                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2226                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2227                                                                 AMDGPU_CSA_SIZE);
2228                                 if (r) {
2229                                         DRM_ERROR("allocate CSA failed %d\n", r);
2230                                         goto init_failed;
2231                                 }
2232                         }
2233                 }
2234         }
2235
2236         if (amdgpu_sriov_vf(adev))
2237                 amdgpu_virt_init_data_exchange(adev);
2238
2239         r = amdgpu_ib_pool_init(adev);
2240         if (r) {
2241                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2242                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2243                 goto init_failed;
2244         }
2245
2246         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2247         if (r)
2248                 goto init_failed;
2249
2250         r = amdgpu_device_ip_hw_init_phase1(adev);
2251         if (r)
2252                 goto init_failed;
2253
2254         r = amdgpu_device_fw_loading(adev);
2255         if (r)
2256                 goto init_failed;
2257
2258         r = amdgpu_device_ip_hw_init_phase2(adev);
2259         if (r)
2260                 goto init_failed;
2261
2262         /*
2263          * retired pages will be loaded from eeprom and reserved here,
2264          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2265          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2266          * for I2C communication which only true at this point.
2267          *
2268          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2269          * failure from bad gpu situation and stop amdgpu init process
2270          * accordingly. For other failed cases, it will still release all
2271          * the resource and print error message, rather than returning one
2272          * negative value to upper level.
2273          *
2274          * Note: theoretically, this should be called before all vram allocations
2275          * to protect retired page from abusing
2276          */
2277         r = amdgpu_ras_recovery_init(adev);
2278         if (r)
2279                 goto init_failed;
2280
2281         if (adev->gmc.xgmi.num_physical_nodes > 1)
2282                 amdgpu_xgmi_add_device(adev);
2283         amdgpu_amdkfd_device_init(adev);
2284
2285         amdgpu_fru_get_product_info(adev);
2286
2287 init_failed:
2288         if (amdgpu_sriov_vf(adev))
2289                 amdgpu_virt_release_full_gpu(adev, true);
2290
2291         return r;
2292 }
2293
2294 /**
2295  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2296  *
2297  * @adev: amdgpu_device pointer
2298  *
2299  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2300  * this function before a GPU reset.  If the value is retained after a
2301  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2302  */
2303 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2304 {
2305         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2306 }
2307
2308 /**
2309  * amdgpu_device_check_vram_lost - check if vram is valid
2310  *
2311  * @adev: amdgpu_device pointer
2312  *
2313  * Checks the reset magic value written to the gart pointer in VRAM.
2314  * The driver calls this after a GPU reset to see if the contents of
2315  * VRAM is lost or now.
2316  * returns true if vram is lost, false if not.
2317  */
2318 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2319 {
2320         if (memcmp(adev->gart.ptr, adev->reset_magic,
2321                         AMDGPU_RESET_MAGIC_NUM))
2322                 return true;
2323
2324         if (!amdgpu_in_reset(adev))
2325                 return false;
2326
2327         /*
2328          * For all ASICs with baco/mode1 reset, the VRAM is
2329          * always assumed to be lost.
2330          */
2331         switch (amdgpu_asic_reset_method(adev)) {
2332         case AMD_RESET_METHOD_BACO:
2333         case AMD_RESET_METHOD_MODE1:
2334                 return true;
2335         default:
2336                 return false;
2337         }
2338 }
2339
2340 /**
2341  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2342  *
2343  * @adev: amdgpu_device pointer
2344  * @state: clockgating state (gate or ungate)
2345  *
2346  * The list of all the hardware IPs that make up the asic is walked and the
2347  * set_clockgating_state callbacks are run.
2348  * Late initialization pass enabling clockgating for hardware IPs.
2349  * Fini or suspend, pass disabling clockgating for hardware IPs.
2350  * Returns 0 on success, negative error code on failure.
2351  */
2352
2353 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2354                                                 enum amd_clockgating_state state)
2355 {
2356         int i, j, r;
2357
2358         if (amdgpu_emu_mode == 1)
2359                 return 0;
2360
2361         for (j = 0; j < adev->num_ip_blocks; j++) {
2362                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2363                 if (!adev->ip_blocks[i].status.late_initialized)
2364                         continue;
2365                 /* skip CG for VCE/UVD, it's handled specially */
2366                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2367                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2368                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2369                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2370                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2371                         /* enable clockgating to save power */
2372                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2373                                                                                      state);
2374                         if (r) {
2375                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2376                                           adev->ip_blocks[i].version->funcs->name, r);
2377                                 return r;
2378                         }
2379                 }
2380         }
2381
2382         return 0;
2383 }
2384
2385 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2386 {
2387         int i, j, r;
2388
2389         if (amdgpu_emu_mode == 1)
2390                 return 0;
2391
2392         for (j = 0; j < adev->num_ip_blocks; j++) {
2393                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2394                 if (!adev->ip_blocks[i].status.late_initialized)
2395                         continue;
2396                 /* skip CG for VCE/UVD, it's handled specially */
2397                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2398                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2399                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2400                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2401                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2402                         /* enable powergating to save power */
2403                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2404                                                                                         state);
2405                         if (r) {
2406                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2407                                           adev->ip_blocks[i].version->funcs->name, r);
2408                                 return r;
2409                         }
2410                 }
2411         }
2412         return 0;
2413 }
2414
2415 static int amdgpu_device_enable_mgpu_fan_boost(void)
2416 {
2417         struct amdgpu_gpu_instance *gpu_ins;
2418         struct amdgpu_device *adev;
2419         int i, ret = 0;
2420
2421         mutex_lock(&mgpu_info.mutex);
2422
2423         /*
2424          * MGPU fan boost feature should be enabled
2425          * only when there are two or more dGPUs in
2426          * the system
2427          */
2428         if (mgpu_info.num_dgpu < 2)
2429                 goto out;
2430
2431         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2432                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2433                 adev = gpu_ins->adev;
2434                 if (!(adev->flags & AMD_IS_APU) &&
2435                     !gpu_ins->mgpu_fan_enabled) {
2436                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2437                         if (ret)
2438                                 break;
2439
2440                         gpu_ins->mgpu_fan_enabled = 1;
2441                 }
2442         }
2443
2444 out:
2445         mutex_unlock(&mgpu_info.mutex);
2446
2447         return ret;
2448 }
2449
2450 /**
2451  * amdgpu_device_ip_late_init - run late init for hardware IPs
2452  *
2453  * @adev: amdgpu_device pointer
2454  *
2455  * Late initialization pass for hardware IPs.  The list of all the hardware
2456  * IPs that make up the asic is walked and the late_init callbacks are run.
2457  * late_init covers any special initialization that an IP requires
2458  * after all of the have been initialized or something that needs to happen
2459  * late in the init process.
2460  * Returns 0 on success, negative error code on failure.
2461  */
2462 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2463 {
2464         struct amdgpu_gpu_instance *gpu_instance;
2465         int i = 0, r;
2466
2467         for (i = 0; i < adev->num_ip_blocks; i++) {
2468                 if (!adev->ip_blocks[i].status.hw)
2469                         continue;
2470                 if (adev->ip_blocks[i].version->funcs->late_init) {
2471                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2472                         if (r) {
2473                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2474                                           adev->ip_blocks[i].version->funcs->name, r);
2475                                 return r;
2476                         }
2477                 }
2478                 adev->ip_blocks[i].status.late_initialized = true;
2479         }
2480
2481         amdgpu_ras_set_error_query_ready(adev, true);
2482
2483         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2484         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2485
2486         amdgpu_device_fill_reset_magic(adev);
2487
2488         r = amdgpu_device_enable_mgpu_fan_boost();
2489         if (r)
2490                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2491
2492
2493         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2494                 mutex_lock(&mgpu_info.mutex);
2495
2496                 /*
2497                  * Reset device p-state to low as this was booted with high.
2498                  *
2499                  * This should be performed only after all devices from the same
2500                  * hive get initialized.
2501                  *
2502                  * However, it's unknown how many device in the hive in advance.
2503                  * As this is counted one by one during devices initializations.
2504                  *
2505                  * So, we wait for all XGMI interlinked devices initialized.
2506                  * This may bring some delays as those devices may come from
2507                  * different hives. But that should be OK.
2508                  */
2509                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2510                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2511                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2512                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2513                                         continue;
2514
2515                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2516                                                 AMDGPU_XGMI_PSTATE_MIN);
2517                                 if (r) {
2518                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2519                                         break;
2520                                 }
2521                         }
2522                 }
2523
2524                 mutex_unlock(&mgpu_info.mutex);
2525         }
2526
2527         return 0;
2528 }
2529
2530 /**
2531  * amdgpu_device_ip_fini - run fini for hardware IPs
2532  *
2533  * @adev: amdgpu_device pointer
2534  *
2535  * Main teardown pass for hardware IPs.  The list of all the hardware
2536  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2537  * are run.  hw_fini tears down the hardware associated with each IP
2538  * and sw_fini tears down any software state associated with each IP.
2539  * Returns 0 on success, negative error code on failure.
2540  */
2541 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2542 {
2543         int i, r;
2544
2545         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2546                 amdgpu_virt_release_ras_err_handler_data(adev);
2547
2548         amdgpu_ras_pre_fini(adev);
2549
2550         if (adev->gmc.xgmi.num_physical_nodes > 1)
2551                 amdgpu_xgmi_remove_device(adev);
2552
2553         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2554         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2555
2556         amdgpu_amdkfd_device_fini(adev);
2557
2558         /* need to disable SMC first */
2559         for (i = 0; i < adev->num_ip_blocks; i++) {
2560                 if (!adev->ip_blocks[i].status.hw)
2561                         continue;
2562                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2563                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2564                         /* XXX handle errors */
2565                         if (r) {
2566                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2567                                           adev->ip_blocks[i].version->funcs->name, r);
2568                         }
2569                         adev->ip_blocks[i].status.hw = false;
2570                         break;
2571                 }
2572         }
2573
2574         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2575                 if (!adev->ip_blocks[i].status.hw)
2576                         continue;
2577
2578                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2579                 /* XXX handle errors */
2580                 if (r) {
2581                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2582                                   adev->ip_blocks[i].version->funcs->name, r);
2583                 }
2584
2585                 adev->ip_blocks[i].status.hw = false;
2586         }
2587
2588
2589         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2590                 if (!adev->ip_blocks[i].status.sw)
2591                         continue;
2592
2593                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2594                         amdgpu_ucode_free_bo(adev);
2595                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2596                         amdgpu_device_wb_fini(adev);
2597                         amdgpu_device_vram_scratch_fini(adev);
2598                         amdgpu_ib_pool_fini(adev);
2599                 }
2600
2601                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2602                 /* XXX handle errors */
2603                 if (r) {
2604                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2605                                   adev->ip_blocks[i].version->funcs->name, r);
2606                 }
2607                 adev->ip_blocks[i].status.sw = false;
2608                 adev->ip_blocks[i].status.valid = false;
2609         }
2610
2611         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2612                 if (!adev->ip_blocks[i].status.late_initialized)
2613                         continue;
2614                 if (adev->ip_blocks[i].version->funcs->late_fini)
2615                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2616                 adev->ip_blocks[i].status.late_initialized = false;
2617         }
2618
2619         amdgpu_ras_fini(adev);
2620
2621         if (amdgpu_sriov_vf(adev))
2622                 if (amdgpu_virt_release_full_gpu(adev, false))
2623                         DRM_ERROR("failed to release exclusive mode on fini\n");
2624
2625         return 0;
2626 }
2627
2628 /**
2629  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2630  *
2631  * @work: work_struct.
2632  */
2633 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2634 {
2635         struct amdgpu_device *adev =
2636                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2637         int r;
2638
2639         r = amdgpu_ib_ring_tests(adev);
2640         if (r)
2641                 DRM_ERROR("ib ring test failed (%d).\n", r);
2642 }
2643
2644 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2645 {
2646         struct amdgpu_device *adev =
2647                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2648
2649         mutex_lock(&adev->gfx.gfx_off_mutex);
2650         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2651                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2652                         adev->gfx.gfx_off_state = true;
2653         }
2654         mutex_unlock(&adev->gfx.gfx_off_mutex);
2655 }
2656
2657 /**
2658  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2659  *
2660  * @adev: amdgpu_device pointer
2661  *
2662  * Main suspend function for hardware IPs.  The list of all the hardware
2663  * IPs that make up the asic is walked, clockgating is disabled and the
2664  * suspend callbacks are run.  suspend puts the hardware and software state
2665  * in each IP into a state suitable for suspend.
2666  * Returns 0 on success, negative error code on failure.
2667  */
2668 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2669 {
2670         int i, r;
2671
2672         if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
2673                 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2674                 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2675         }
2676
2677         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2678                 if (!adev->ip_blocks[i].status.valid)
2679                         continue;
2680
2681                 /* displays are handled separately */
2682                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2683                         continue;
2684
2685                 /* XXX handle errors */
2686                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2687                 /* XXX handle errors */
2688                 if (r) {
2689                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2690                                   adev->ip_blocks[i].version->funcs->name, r);
2691                         return r;
2692                 }
2693
2694                 adev->ip_blocks[i].status.hw = false;
2695         }
2696
2697         return 0;
2698 }
2699
2700 /**
2701  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2702  *
2703  * @adev: amdgpu_device pointer
2704  *
2705  * Main suspend function for hardware IPs.  The list of all the hardware
2706  * IPs that make up the asic is walked, clockgating is disabled and the
2707  * suspend callbacks are run.  suspend puts the hardware and software state
2708  * in each IP into a state suitable for suspend.
2709  * Returns 0 on success, negative error code on failure.
2710  */
2711 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2712 {
2713         int i, r;
2714
2715         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2716                 if (!adev->ip_blocks[i].status.valid)
2717                         continue;
2718                 /* displays are handled in phase1 */
2719                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2720                         continue;
2721                 /* PSP lost connection when err_event_athub occurs */
2722                 if (amdgpu_ras_intr_triggered() &&
2723                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2724                         adev->ip_blocks[i].status.hw = false;
2725                         continue;
2726                 }
2727                 /* XXX handle errors */
2728                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2729                 /* XXX handle errors */
2730                 if (r) {
2731                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2732                                   adev->ip_blocks[i].version->funcs->name, r);
2733                 }
2734                 adev->ip_blocks[i].status.hw = false;
2735                 /* handle putting the SMC in the appropriate state */
2736                 if(!amdgpu_sriov_vf(adev)){
2737                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2738                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2739                                 if (r) {
2740                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2741                                                         adev->mp1_state, r);
2742                                         return r;
2743                                 }
2744                         }
2745                 }
2746                 adev->ip_blocks[i].status.hw = false;
2747         }
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * Main suspend function for hardware IPs.  The list of all the hardware
2758  * IPs that make up the asic is walked, clockgating is disabled and the
2759  * suspend callbacks are run.  suspend puts the hardware and software state
2760  * in each IP into a state suitable for suspend.
2761  * Returns 0 on success, negative error code on failure.
2762  */
2763 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2764 {
2765         int r;
2766
2767         if (amdgpu_sriov_vf(adev))
2768                 amdgpu_virt_request_full_gpu(adev, false);
2769
2770         r = amdgpu_device_ip_suspend_phase1(adev);
2771         if (r)
2772                 return r;
2773         r = amdgpu_device_ip_suspend_phase2(adev);
2774
2775         if (amdgpu_sriov_vf(adev))
2776                 amdgpu_virt_release_full_gpu(adev, false);
2777
2778         return r;
2779 }
2780
2781 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2782 {
2783         int i, r;
2784
2785         static enum amd_ip_block_type ip_order[] = {
2786                 AMD_IP_BLOCK_TYPE_GMC,
2787                 AMD_IP_BLOCK_TYPE_COMMON,
2788                 AMD_IP_BLOCK_TYPE_PSP,
2789                 AMD_IP_BLOCK_TYPE_IH,
2790         };
2791
2792         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2793                 int j;
2794                 struct amdgpu_ip_block *block;
2795
2796                 block = &adev->ip_blocks[i];
2797                 block->status.hw = false;
2798
2799                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2800
2801                         if (block->version->type != ip_order[j] ||
2802                                 !block->status.valid)
2803                                 continue;
2804
2805                         r = block->version->funcs->hw_init(adev);
2806                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2807                         if (r)
2808                                 return r;
2809                         block->status.hw = true;
2810                 }
2811         }
2812
2813         return 0;
2814 }
2815
2816 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2817 {
2818         int i, r;
2819
2820         static enum amd_ip_block_type ip_order[] = {
2821                 AMD_IP_BLOCK_TYPE_SMC,
2822                 AMD_IP_BLOCK_TYPE_DCE,
2823                 AMD_IP_BLOCK_TYPE_GFX,
2824                 AMD_IP_BLOCK_TYPE_SDMA,
2825                 AMD_IP_BLOCK_TYPE_UVD,
2826                 AMD_IP_BLOCK_TYPE_VCE,
2827                 AMD_IP_BLOCK_TYPE_VCN
2828         };
2829
2830         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2831                 int j;
2832                 struct amdgpu_ip_block *block;
2833
2834                 for (j = 0; j < adev->num_ip_blocks; j++) {
2835                         block = &adev->ip_blocks[j];
2836
2837                         if (block->version->type != ip_order[i] ||
2838                                 !block->status.valid ||
2839                                 block->status.hw)
2840                                 continue;
2841
2842                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2843                                 r = block->version->funcs->resume(adev);
2844                         else
2845                                 r = block->version->funcs->hw_init(adev);
2846
2847                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2848                         if (r)
2849                                 return r;
2850                         block->status.hw = true;
2851                 }
2852         }
2853
2854         return 0;
2855 }
2856
2857 /**
2858  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2859  *
2860  * @adev: amdgpu_device pointer
2861  *
2862  * First resume function for hardware IPs.  The list of all the hardware
2863  * IPs that make up the asic is walked and the resume callbacks are run for
2864  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2865  * after a suspend and updates the software state as necessary.  This
2866  * function is also used for restoring the GPU after a GPU reset.
2867  * Returns 0 on success, negative error code on failure.
2868  */
2869 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2870 {
2871         int i, r;
2872
2873         for (i = 0; i < adev->num_ip_blocks; i++) {
2874                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2875                         continue;
2876                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2877                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2878                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2879
2880                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2881                         if (r) {
2882                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2883                                           adev->ip_blocks[i].version->funcs->name, r);
2884                                 return r;
2885                         }
2886                         adev->ip_blocks[i].status.hw = true;
2887                 }
2888         }
2889
2890         return 0;
2891 }
2892
2893 /**
2894  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2895  *
2896  * @adev: amdgpu_device pointer
2897  *
2898  * First resume function for hardware IPs.  The list of all the hardware
2899  * IPs that make up the asic is walked and the resume callbacks are run for
2900  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2901  * functional state after a suspend and updates the software state as
2902  * necessary.  This function is also used for restoring the GPU after a GPU
2903  * reset.
2904  * Returns 0 on success, negative error code on failure.
2905  */
2906 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2907 {
2908         int i, r;
2909
2910         for (i = 0; i < adev->num_ip_blocks; i++) {
2911                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2912                         continue;
2913                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2914                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2915                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2916                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2917                         continue;
2918                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2919                 if (r) {
2920                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2921                                   adev->ip_blocks[i].version->funcs->name, r);
2922                         return r;
2923                 }
2924                 adev->ip_blocks[i].status.hw = true;
2925         }
2926
2927         return 0;
2928 }
2929
2930 /**
2931  * amdgpu_device_ip_resume - run resume for hardware IPs
2932  *
2933  * @adev: amdgpu_device pointer
2934  *
2935  * Main resume function for hardware IPs.  The hardware IPs
2936  * are split into two resume functions because they are
2937  * are also used in in recovering from a GPU reset and some additional
2938  * steps need to be take between them.  In this case (S3/S4) they are
2939  * run sequentially.
2940  * Returns 0 on success, negative error code on failure.
2941  */
2942 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2943 {
2944         int r;
2945
2946         r = amdgpu_device_ip_resume_phase1(adev);
2947         if (r)
2948                 return r;
2949
2950         r = amdgpu_device_fw_loading(adev);
2951         if (r)
2952                 return r;
2953
2954         r = amdgpu_device_ip_resume_phase2(adev);
2955
2956         return r;
2957 }
2958
2959 /**
2960  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2961  *
2962  * @adev: amdgpu_device pointer
2963  *
2964  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2965  */
2966 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2967 {
2968         if (amdgpu_sriov_vf(adev)) {
2969                 if (adev->is_atom_fw) {
2970                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2971                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2972                 } else {
2973                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2974                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2975                 }
2976
2977                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2978                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2979         }
2980 }
2981
2982 /**
2983  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2984  *
2985  * @asic_type: AMD asic type
2986  *
2987  * Check if there is DC (new modesetting infrastructre) support for an asic.
2988  * returns true if DC has support, false if not.
2989  */
2990 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2991 {
2992         switch (asic_type) {
2993 #if defined(CONFIG_DRM_AMD_DC)
2994 #if defined(CONFIG_DRM_AMD_DC_SI)
2995         case CHIP_TAHITI:
2996         case CHIP_PITCAIRN:
2997         case CHIP_VERDE:
2998         case CHIP_OLAND:
2999 #endif
3000         case CHIP_BONAIRE:
3001         case CHIP_KAVERI:
3002         case CHIP_KABINI:
3003         case CHIP_MULLINS:
3004                 /*
3005                  * We have systems in the wild with these ASICs that require
3006                  * LVDS and VGA support which is not supported with DC.
3007                  *
3008                  * Fallback to the non-DC driver here by default so as not to
3009                  * cause regressions.
3010                  */
3011                 return amdgpu_dc > 0;
3012         case CHIP_HAWAII:
3013         case CHIP_CARRIZO:
3014         case CHIP_STONEY:
3015         case CHIP_POLARIS10:
3016         case CHIP_POLARIS11:
3017         case CHIP_POLARIS12:
3018         case CHIP_VEGAM:
3019         case CHIP_TONGA:
3020         case CHIP_FIJI:
3021         case CHIP_VEGA10:
3022         case CHIP_VEGA12:
3023         case CHIP_VEGA20:
3024 #if defined(CONFIG_DRM_AMD_DC_DCN)
3025         case CHIP_RAVEN:
3026         case CHIP_NAVI10:
3027         case CHIP_NAVI14:
3028         case CHIP_NAVI12:
3029         case CHIP_RENOIR:
3030         case CHIP_SIENNA_CICHLID:
3031         case CHIP_NAVY_FLOUNDER:
3032         case CHIP_DIMGREY_CAVEFISH:
3033         case CHIP_VANGOGH:
3034 #endif
3035                 return amdgpu_dc != 0;
3036 #endif
3037         default:
3038                 if (amdgpu_dc > 0)
3039                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3040                                          "but isn't supported by ASIC, ignoring\n");
3041                 return false;
3042         }
3043 }
3044
3045 /**
3046  * amdgpu_device_has_dc_support - check if dc is supported
3047  *
3048  * @adev: amdgpu_device pointer
3049  *
3050  * Returns true for supported, false for not supported
3051  */
3052 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3053 {
3054         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3055                 return false;
3056
3057         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3058 }
3059
3060
3061 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3062 {
3063         struct amdgpu_device *adev =
3064                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3065         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3066
3067         /* It's a bug to not have a hive within this function */
3068         if (WARN_ON(!hive))
3069                 return;
3070
3071         /*
3072          * Use task barrier to synchronize all xgmi reset works across the
3073          * hive. task_barrier_enter and task_barrier_exit will block
3074          * until all the threads running the xgmi reset works reach
3075          * those points. task_barrier_full will do both blocks.
3076          */
3077         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3078
3079                 task_barrier_enter(&hive->tb);
3080                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3081
3082                 if (adev->asic_reset_res)
3083                         goto fail;
3084
3085                 task_barrier_exit(&hive->tb);
3086                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3087
3088                 if (adev->asic_reset_res)
3089                         goto fail;
3090
3091                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3092                         adev->mmhub.funcs->reset_ras_error_count(adev);
3093         } else {
3094
3095                 task_barrier_full(&hive->tb);
3096                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3097         }
3098
3099 fail:
3100         if (adev->asic_reset_res)
3101                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3102                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3103         amdgpu_put_xgmi_hive(hive);
3104 }
3105
3106 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3107 {
3108         char *input = amdgpu_lockup_timeout;
3109         char *timeout_setting = NULL;
3110         int index = 0;
3111         long timeout;
3112         int ret = 0;
3113
3114         /*
3115          * By default timeout for non compute jobs is 10000.
3116          * And there is no timeout enforced on compute jobs.
3117          * In SR-IOV or passthrough mode, timeout for compute
3118          * jobs are 60000 by default.
3119          */
3120         adev->gfx_timeout = msecs_to_jiffies(10000);
3121         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3122         if (amdgpu_sriov_vf(adev))
3123                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3124                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3125         else if (amdgpu_passthrough(adev))
3126                 adev->compute_timeout =  msecs_to_jiffies(60000);
3127         else
3128                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3129
3130         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3131                 while ((timeout_setting = strsep(&input, ",")) &&
3132                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3133                         ret = kstrtol(timeout_setting, 0, &timeout);
3134                         if (ret)
3135                                 return ret;
3136
3137                         if (timeout == 0) {
3138                                 index++;
3139                                 continue;
3140                         } else if (timeout < 0) {
3141                                 timeout = MAX_SCHEDULE_TIMEOUT;
3142                         } else {
3143                                 timeout = msecs_to_jiffies(timeout);
3144                         }
3145
3146                         switch (index++) {
3147                         case 0:
3148                                 adev->gfx_timeout = timeout;
3149                                 break;
3150                         case 1:
3151                                 adev->compute_timeout = timeout;
3152                                 break;
3153                         case 2:
3154                                 adev->sdma_timeout = timeout;
3155                                 break;
3156                         case 3:
3157                                 adev->video_timeout = timeout;
3158                                 break;
3159                         default:
3160                                 break;
3161                         }
3162                 }
3163                 /*
3164                  * There is only one value specified and
3165                  * it should apply to all non-compute jobs.
3166                  */
3167                 if (index == 1) {
3168                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3169                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3170                                 adev->compute_timeout = adev->gfx_timeout;
3171                 }
3172         }
3173
3174         return ret;
3175 }
3176
3177 static const struct attribute *amdgpu_dev_attributes[] = {
3178         &dev_attr_product_name.attr,
3179         &dev_attr_product_number.attr,
3180         &dev_attr_serial_number.attr,
3181         &dev_attr_pcie_replay_count.attr,
3182         NULL
3183 };
3184
3185
3186 /**
3187  * amdgpu_device_init - initialize the driver
3188  *
3189  * @adev: amdgpu_device pointer
3190  * @flags: driver flags
3191  *
3192  * Initializes the driver info and hw (all asics).
3193  * Returns 0 for success or an error on failure.
3194  * Called at driver startup.
3195  */
3196 int amdgpu_device_init(struct amdgpu_device *adev,
3197                        uint32_t flags)
3198 {
3199         struct drm_device *ddev = adev_to_drm(adev);
3200         struct pci_dev *pdev = adev->pdev;
3201         int r, i;
3202         bool atpx = false;
3203         u32 max_MBps;
3204
3205         adev->shutdown = false;
3206         adev->flags = flags;
3207
3208         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3209                 adev->asic_type = amdgpu_force_asic_type;
3210         else
3211                 adev->asic_type = flags & AMD_ASIC_MASK;
3212
3213         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3214         if (amdgpu_emu_mode == 1)
3215                 adev->usec_timeout *= 10;
3216         adev->gmc.gart_size = 512 * 1024 * 1024;
3217         adev->accel_working = false;
3218         adev->num_rings = 0;
3219         adev->mman.buffer_funcs = NULL;
3220         adev->mman.buffer_funcs_ring = NULL;
3221         adev->vm_manager.vm_pte_funcs = NULL;
3222         adev->vm_manager.vm_pte_num_scheds = 0;
3223         adev->gmc.gmc_funcs = NULL;
3224         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3225         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3226
3227         adev->smc_rreg = &amdgpu_invalid_rreg;
3228         adev->smc_wreg = &amdgpu_invalid_wreg;
3229         adev->pcie_rreg = &amdgpu_invalid_rreg;
3230         adev->pcie_wreg = &amdgpu_invalid_wreg;
3231         adev->pciep_rreg = &amdgpu_invalid_rreg;
3232         adev->pciep_wreg = &amdgpu_invalid_wreg;
3233         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3234         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3235         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3236         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3237         adev->didt_rreg = &amdgpu_invalid_rreg;
3238         adev->didt_wreg = &amdgpu_invalid_wreg;
3239         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3240         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3241         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3242         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3243
3244         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3245                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3246                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3247
3248         /* mutex initialization are all done here so we
3249          * can recall function without having locking issues */
3250         atomic_set(&adev->irq.ih.lock, 0);
3251         mutex_init(&adev->firmware.mutex);
3252         mutex_init(&adev->pm.mutex);
3253         mutex_init(&adev->gfx.gpu_clock_mutex);
3254         mutex_init(&adev->srbm_mutex);
3255         mutex_init(&adev->gfx.pipe_reserve_mutex);
3256         mutex_init(&adev->gfx.gfx_off_mutex);
3257         mutex_init(&adev->grbm_idx_mutex);
3258         mutex_init(&adev->mn_lock);
3259         mutex_init(&adev->virt.vf_errors.lock);
3260         hash_init(adev->mn_hash);
3261         atomic_set(&adev->in_gpu_reset, 0);
3262         init_rwsem(&adev->reset_sem);
3263         mutex_init(&adev->psp.mutex);
3264         mutex_init(&adev->notifier_lock);
3265
3266         r = amdgpu_device_check_arguments(adev);
3267         if (r)
3268                 return r;
3269
3270         spin_lock_init(&adev->mmio_idx_lock);
3271         spin_lock_init(&adev->smc_idx_lock);
3272         spin_lock_init(&adev->pcie_idx_lock);
3273         spin_lock_init(&adev->uvd_ctx_idx_lock);
3274         spin_lock_init(&adev->didt_idx_lock);
3275         spin_lock_init(&adev->gc_cac_idx_lock);
3276         spin_lock_init(&adev->se_cac_idx_lock);
3277         spin_lock_init(&adev->audio_endpt_idx_lock);
3278         spin_lock_init(&adev->mm_stats.lock);
3279
3280         INIT_LIST_HEAD(&adev->shadow_list);
3281         mutex_init(&adev->shadow_list_lock);
3282
3283         INIT_DELAYED_WORK(&adev->delayed_init_work,
3284                           amdgpu_device_delayed_init_work_handler);
3285         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3286                           amdgpu_device_delay_enable_gfx_off);
3287
3288         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3289
3290         adev->gfx.gfx_off_req_count = 1;
3291         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3292
3293         atomic_set(&adev->throttling_logging_enabled, 1);
3294         /*
3295          * If throttling continues, logging will be performed every minute
3296          * to avoid log flooding. "-1" is subtracted since the thermal
3297          * throttling interrupt comes every second. Thus, the total logging
3298          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3299          * for throttling interrupt) = 60 seconds.
3300          */
3301         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3302         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3303
3304         /* Registers mapping */
3305         /* TODO: block userspace mapping of io register */
3306         if (adev->asic_type >= CHIP_BONAIRE) {
3307                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3308                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3309         } else {
3310                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3311                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3312         }
3313
3314         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3315         if (adev->rmmio == NULL) {
3316                 return -ENOMEM;
3317         }
3318         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3319         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3320
3321         /* io port mapping */
3322         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3323                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3324                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3325                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3326                         break;
3327                 }
3328         }
3329         if (adev->rio_mem == NULL)
3330                 DRM_INFO("PCI I/O BAR is not found.\n");
3331
3332         /* enable PCIE atomic ops */
3333         r = pci_enable_atomic_ops_to_root(adev->pdev,
3334                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3335                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3336         if (r) {
3337                 adev->have_atomics_support = false;
3338                 DRM_INFO("PCIE atomic ops is not supported\n");
3339         } else {
3340                 adev->have_atomics_support = true;
3341         }
3342
3343         amdgpu_device_get_pcie_info(adev);
3344
3345         if (amdgpu_mcbp)
3346                 DRM_INFO("MCBP is enabled\n");
3347
3348         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3349                 adev->enable_mes = true;
3350
3351         /* detect hw virtualization here */
3352         amdgpu_detect_virtualization(adev);
3353
3354         r = amdgpu_device_get_job_timeout_settings(adev);
3355         if (r) {
3356                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3357                 goto failed_unmap;
3358         }
3359
3360         /* early init functions */
3361         r = amdgpu_device_ip_early_init(adev);
3362         if (r)
3363                 goto failed_unmap;
3364
3365         /* doorbell bar mapping and doorbell index init*/
3366         amdgpu_device_doorbell_init(adev);
3367
3368         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3369         /* this will fail for cards that aren't VGA class devices, just
3370          * ignore it */
3371         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3372                 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3373
3374         if (amdgpu_device_supports_atpx(ddev))
3375                 atpx = true;
3376         if (amdgpu_has_atpx() &&
3377             (amdgpu_is_atpx_hybrid() ||
3378              amdgpu_has_atpx_dgpu_power_cntl()) &&
3379             !pci_is_thunderbolt_attached(adev->pdev))
3380                 vga_switcheroo_register_client(adev->pdev,
3381                                                &amdgpu_switcheroo_ops, atpx);
3382         if (atpx)
3383                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3384
3385         if (amdgpu_emu_mode == 1) {
3386                 /* post the asic on emulation mode */
3387                 emu_soc_asic_init(adev);
3388                 goto fence_driver_init;
3389         }
3390
3391         /* detect if we are with an SRIOV vbios */
3392         amdgpu_device_detect_sriov_bios(adev);
3393
3394         /* check if we need to reset the asic
3395          *  E.g., driver was not cleanly unloaded previously, etc.
3396          */
3397         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3398                 r = amdgpu_asic_reset(adev);
3399                 if (r) {
3400                         dev_err(adev->dev, "asic reset on init failed\n");
3401                         goto failed;
3402                 }
3403         }
3404
3405         pci_enable_pcie_error_reporting(adev->pdev);
3406
3407         /* Post card if necessary */
3408         if (amdgpu_device_need_post(adev)) {
3409                 if (!adev->bios) {
3410                         dev_err(adev->dev, "no vBIOS found\n");
3411                         r = -EINVAL;
3412                         goto failed;
3413                 }
3414                 DRM_INFO("GPU posting now...\n");
3415                 r = amdgpu_device_asic_init(adev);
3416                 if (r) {
3417                         dev_err(adev->dev, "gpu post error!\n");
3418                         goto failed;
3419                 }
3420         }
3421
3422         if (adev->is_atom_fw) {
3423                 /* Initialize clocks */
3424                 r = amdgpu_atomfirmware_get_clock_info(adev);
3425                 if (r) {
3426                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3427                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3428                         goto failed;
3429                 }
3430         } else {
3431                 /* Initialize clocks */
3432                 r = amdgpu_atombios_get_clock_info(adev);
3433                 if (r) {
3434                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3435                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3436                         goto failed;
3437                 }
3438                 /* init i2c buses */
3439                 if (!amdgpu_device_has_dc_support(adev))
3440                         amdgpu_atombios_i2c_init(adev);
3441         }
3442
3443 fence_driver_init:
3444         /* Fence driver */
3445         r = amdgpu_fence_driver_init(adev);
3446         if (r) {
3447                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3448                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3449                 goto failed;
3450         }
3451
3452         /* init the mode config */
3453         drm_mode_config_init(adev_to_drm(adev));
3454
3455         r = amdgpu_device_ip_init(adev);
3456         if (r) {
3457                 /* failed in exclusive mode due to timeout */
3458                 if (amdgpu_sriov_vf(adev) &&
3459                     !amdgpu_sriov_runtime(adev) &&
3460                     amdgpu_virt_mmio_blocked(adev) &&
3461                     !amdgpu_virt_wait_reset(adev)) {
3462                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3463                         /* Don't send request since VF is inactive. */
3464                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3465                         adev->virt.ops = NULL;
3466                         r = -EAGAIN;
3467                         goto failed;
3468                 }
3469                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3470                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3471                 goto failed;
3472         }
3473
3474         dev_info(adev->dev,
3475                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3476                         adev->gfx.config.max_shader_engines,
3477                         adev->gfx.config.max_sh_per_se,
3478                         adev->gfx.config.max_cu_per_sh,
3479                         adev->gfx.cu_info.number);
3480
3481         adev->accel_working = true;
3482
3483         amdgpu_vm_check_compute_bug(adev);
3484
3485         /* Initialize the buffer migration limit. */
3486         if (amdgpu_moverate >= 0)
3487                 max_MBps = amdgpu_moverate;
3488         else
3489                 max_MBps = 8; /* Allow 8 MB/s. */
3490         /* Get a log2 for easy divisions. */
3491         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3492
3493         amdgpu_fbdev_init(adev);
3494
3495         r = amdgpu_pm_sysfs_init(adev);
3496         if (r) {
3497                 adev->pm_sysfs_en = false;
3498                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3499         } else
3500                 adev->pm_sysfs_en = true;
3501
3502         r = amdgpu_ucode_sysfs_init(adev);
3503         if (r) {
3504                 adev->ucode_sysfs_en = false;
3505                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3506         } else
3507                 adev->ucode_sysfs_en = true;
3508
3509         if ((amdgpu_testing & 1)) {
3510                 if (adev->accel_working)
3511                         amdgpu_test_moves(adev);
3512                 else
3513                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3514         }
3515         if (amdgpu_benchmarking) {
3516                 if (adev->accel_working)
3517                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3518                 else
3519                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3520         }
3521
3522         /*
3523          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3524          * Otherwise the mgpu fan boost feature will be skipped due to the
3525          * gpu instance is counted less.
3526          */
3527         amdgpu_register_gpu_instance(adev);
3528
3529         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3530          * explicit gating rather than handling it automatically.
3531          */
3532         r = amdgpu_device_ip_late_init(adev);
3533         if (r) {
3534                 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3535                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3536                 goto failed;
3537         }
3538
3539         /* must succeed. */
3540         amdgpu_ras_resume(adev);
3541
3542         queue_delayed_work(system_wq, &adev->delayed_init_work,
3543                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3544
3545         if (amdgpu_sriov_vf(adev))
3546                 flush_delayed_work(&adev->delayed_init_work);
3547
3548         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3549         if (r)
3550                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3551
3552         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3553                 r = amdgpu_pmu_init(adev);
3554         if (r)
3555                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3556
3557         /* Have stored pci confspace at hand for restore in sudden PCI error */
3558         if (amdgpu_device_cache_pci_state(adev->pdev))
3559                 pci_restore_state(pdev);
3560
3561         return 0;
3562
3563 failed:
3564         amdgpu_vf_error_trans_all(adev);
3565         if (atpx)
3566                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3567
3568 failed_unmap:
3569         iounmap(adev->rmmio);
3570         adev->rmmio = NULL;
3571
3572         return r;
3573 }
3574
3575 /**
3576  * amdgpu_device_fini - tear down the driver
3577  *
3578  * @adev: amdgpu_device pointer
3579  *
3580  * Tear down the driver info (all asics).
3581  * Called at driver shutdown.
3582  */
3583 void amdgpu_device_fini(struct amdgpu_device *adev)
3584 {
3585         dev_info(adev->dev, "amdgpu: finishing device.\n");
3586         flush_delayed_work(&adev->delayed_init_work);
3587         adev->shutdown = true;
3588
3589         kfree(adev->pci_state);
3590
3591         /* make sure IB test finished before entering exclusive mode
3592          * to avoid preemption on IB test
3593          * */
3594         if (amdgpu_sriov_vf(adev)) {
3595                 amdgpu_virt_request_full_gpu(adev, false);
3596                 amdgpu_virt_fini_data_exchange(adev);
3597         }
3598
3599         /* disable all interrupts */
3600         amdgpu_irq_disable_all(adev);
3601         if (adev->mode_info.mode_config_initialized){
3602                 if (!amdgpu_device_has_dc_support(adev))
3603                         drm_helper_force_disable_all(adev_to_drm(adev));
3604                 else
3605                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3606         }
3607         amdgpu_fence_driver_fini(adev);
3608         if (adev->pm_sysfs_en)
3609                 amdgpu_pm_sysfs_fini(adev);
3610         amdgpu_fbdev_fini(adev);
3611         amdgpu_device_ip_fini(adev);
3612         release_firmware(adev->firmware.gpu_info_fw);
3613         adev->firmware.gpu_info_fw = NULL;
3614         adev->accel_working = false;
3615         /* free i2c buses */
3616         if (!amdgpu_device_has_dc_support(adev))
3617                 amdgpu_i2c_fini(adev);
3618
3619         if (amdgpu_emu_mode != 1)
3620                 amdgpu_atombios_fini(adev);
3621
3622         kfree(adev->bios);
3623         adev->bios = NULL;
3624         if (amdgpu_has_atpx() &&
3625             (amdgpu_is_atpx_hybrid() ||
3626              amdgpu_has_atpx_dgpu_power_cntl()) &&
3627             !pci_is_thunderbolt_attached(adev->pdev))
3628                 vga_switcheroo_unregister_client(adev->pdev);
3629         if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
3630                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3631         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3632                 vga_client_register(adev->pdev, NULL, NULL, NULL);
3633         if (adev->rio_mem)
3634                 pci_iounmap(adev->pdev, adev->rio_mem);
3635         adev->rio_mem = NULL;
3636         iounmap(adev->rmmio);
3637         adev->rmmio = NULL;
3638         amdgpu_device_doorbell_fini(adev);
3639
3640         if (adev->ucode_sysfs_en)
3641                 amdgpu_ucode_sysfs_fini(adev);
3642
3643         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3644         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3645                 amdgpu_pmu_fini(adev);
3646         if (adev->mman.discovery_bin)
3647                 amdgpu_discovery_fini(adev);
3648 }
3649
3650
3651 /*
3652  * Suspend & resume.
3653  */
3654 /**
3655  * amdgpu_device_suspend - initiate device suspend
3656  *
3657  * @dev: drm dev pointer
3658  * @fbcon : notify the fbdev of suspend
3659  *
3660  * Puts the hw in the suspend state (all asics).
3661  * Returns 0 for success or an error on failure.
3662  * Called at driver suspend.
3663  */
3664 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3665 {
3666         struct amdgpu_device *adev;
3667         struct drm_crtc *crtc;
3668         struct drm_connector *connector;
3669         struct drm_connector_list_iter iter;
3670         int r;
3671
3672         adev = drm_to_adev(dev);
3673
3674         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3675                 return 0;
3676
3677         adev->in_suspend = true;
3678         drm_kms_helper_poll_disable(dev);
3679
3680         if (fbcon)
3681                 amdgpu_fbdev_set_suspend(adev, 1);
3682
3683         cancel_delayed_work_sync(&adev->delayed_init_work);
3684
3685         if (!amdgpu_device_has_dc_support(adev)) {
3686                 /* turn off display hw */
3687                 drm_modeset_lock_all(dev);
3688                 drm_connector_list_iter_begin(dev, &iter);
3689                 drm_for_each_connector_iter(connector, &iter)
3690                         drm_helper_connector_dpms(connector,
3691                                                   DRM_MODE_DPMS_OFF);
3692                 drm_connector_list_iter_end(&iter);
3693                 drm_modeset_unlock_all(dev);
3694                         /* unpin the front buffers and cursors */
3695                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3696                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3697                         struct drm_framebuffer *fb = crtc->primary->fb;
3698                         struct amdgpu_bo *robj;
3699
3700                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3701                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3702                                 r = amdgpu_bo_reserve(aobj, true);
3703                                 if (r == 0) {
3704                                         amdgpu_bo_unpin(aobj);
3705                                         amdgpu_bo_unreserve(aobj);
3706                                 }
3707                         }
3708
3709                         if (fb == NULL || fb->obj[0] == NULL) {
3710                                 continue;
3711                         }
3712                         robj = gem_to_amdgpu_bo(fb->obj[0]);
3713                         /* don't unpin kernel fb objects */
3714                         if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3715                                 r = amdgpu_bo_reserve(robj, true);
3716                                 if (r == 0) {
3717                                         amdgpu_bo_unpin(robj);
3718                                         amdgpu_bo_unreserve(robj);
3719                                 }
3720                         }
3721                 }
3722         }
3723
3724         amdgpu_ras_suspend(adev);
3725
3726         r = amdgpu_device_ip_suspend_phase1(adev);
3727
3728         amdgpu_amdkfd_suspend(adev, !fbcon);
3729
3730         /* evict vram memory */
3731         amdgpu_bo_evict_vram(adev);
3732
3733         amdgpu_fence_driver_suspend(adev);
3734
3735         if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
3736                 r = amdgpu_device_ip_suspend_phase2(adev);
3737         else
3738                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
3739         /* evict remaining vram memory
3740          * This second call to evict vram is to evict the gart page table
3741          * using the CPU.
3742          */
3743         amdgpu_bo_evict_vram(adev);
3744
3745         return 0;
3746 }
3747
3748 /**
3749  * amdgpu_device_resume - initiate device resume
3750  *
3751  * @dev: drm dev pointer
3752  * @fbcon : notify the fbdev of resume
3753  *
3754  * Bring the hw back to operating state (all asics).
3755  * Returns 0 for success or an error on failure.
3756  * Called at driver resume.
3757  */
3758 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3759 {
3760         struct drm_connector *connector;
3761         struct drm_connector_list_iter iter;
3762         struct amdgpu_device *adev = drm_to_adev(dev);
3763         struct drm_crtc *crtc;
3764         int r = 0;
3765
3766         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3767                 return 0;
3768
3769         if (amdgpu_acpi_is_s0ix_supported(adev))
3770                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3771
3772         /* post card */
3773         if (amdgpu_device_need_post(adev)) {
3774                 r = amdgpu_device_asic_init(adev);
3775                 if (r)
3776                         dev_err(adev->dev, "amdgpu asic init failed\n");
3777         }
3778
3779         r = amdgpu_device_ip_resume(adev);
3780         if (r) {
3781                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3782                 return r;
3783         }
3784         amdgpu_fence_driver_resume(adev);
3785
3786
3787         r = amdgpu_device_ip_late_init(adev);
3788         if (r)
3789                 return r;
3790
3791         queue_delayed_work(system_wq, &adev->delayed_init_work,
3792                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3793
3794         if (!amdgpu_device_has_dc_support(adev)) {
3795                 /* pin cursors */
3796                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3797                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3798
3799                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3800                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3801                                 r = amdgpu_bo_reserve(aobj, true);
3802                                 if (r == 0) {
3803                                         r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3804                                         if (r != 0)
3805                                                 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3806                                         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3807                                         amdgpu_bo_unreserve(aobj);
3808                                 }
3809                         }
3810                 }
3811         }
3812         r = amdgpu_amdkfd_resume(adev, !fbcon);
3813         if (r)
3814                 return r;
3815
3816         /* Make sure IB tests flushed */
3817         flush_delayed_work(&adev->delayed_init_work);
3818
3819         /* blat the mode back in */
3820         if (fbcon) {
3821                 if (!amdgpu_device_has_dc_support(adev)) {
3822                         /* pre DCE11 */
3823                         drm_helper_resume_force_mode(dev);
3824
3825                         /* turn on display hw */
3826                         drm_modeset_lock_all(dev);
3827
3828                         drm_connector_list_iter_begin(dev, &iter);
3829                         drm_for_each_connector_iter(connector, &iter)
3830                                 drm_helper_connector_dpms(connector,
3831                                                           DRM_MODE_DPMS_ON);
3832                         drm_connector_list_iter_end(&iter);
3833
3834                         drm_modeset_unlock_all(dev);
3835                 }
3836                 amdgpu_fbdev_set_suspend(adev, 0);
3837         }
3838
3839         drm_kms_helper_poll_enable(dev);
3840
3841         amdgpu_ras_resume(adev);
3842
3843         /*
3844          * Most of the connector probing functions try to acquire runtime pm
3845          * refs to ensure that the GPU is powered on when connector polling is
3846          * performed. Since we're calling this from a runtime PM callback,
3847          * trying to acquire rpm refs will cause us to deadlock.
3848          *
3849          * Since we're guaranteed to be holding the rpm lock, it's safe to
3850          * temporarily disable the rpm helpers so this doesn't deadlock us.
3851          */
3852 #ifdef CONFIG_PM
3853         dev->dev->power.disable_depth++;
3854 #endif
3855         if (!amdgpu_device_has_dc_support(adev))
3856                 drm_helper_hpd_irq_event(dev);
3857         else
3858                 drm_kms_helper_hotplug_event(dev);
3859 #ifdef CONFIG_PM
3860         dev->dev->power.disable_depth--;
3861 #endif
3862         adev->in_suspend = false;
3863
3864         return 0;
3865 }
3866
3867 /**
3868  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3869  *
3870  * @adev: amdgpu_device pointer
3871  *
3872  * The list of all the hardware IPs that make up the asic is walked and
3873  * the check_soft_reset callbacks are run.  check_soft_reset determines
3874  * if the asic is still hung or not.
3875  * Returns true if any of the IPs are still in a hung state, false if not.
3876  */
3877 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3878 {
3879         int i;
3880         bool asic_hang = false;
3881
3882         if (amdgpu_sriov_vf(adev))
3883                 return true;
3884
3885         if (amdgpu_asic_need_full_reset(adev))
3886                 return true;
3887
3888         for (i = 0; i < adev->num_ip_blocks; i++) {
3889                 if (!adev->ip_blocks[i].status.valid)
3890                         continue;
3891                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3892                         adev->ip_blocks[i].status.hang =
3893                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3894                 if (adev->ip_blocks[i].status.hang) {
3895                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3896                         asic_hang = true;
3897                 }
3898         }
3899         return asic_hang;
3900 }
3901
3902 /**
3903  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3904  *
3905  * @adev: amdgpu_device pointer
3906  *
3907  * The list of all the hardware IPs that make up the asic is walked and the
3908  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3909  * handles any IP specific hardware or software state changes that are
3910  * necessary for a soft reset to succeed.
3911  * Returns 0 on success, negative error code on failure.
3912  */
3913 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3914 {
3915         int i, r = 0;
3916
3917         for (i = 0; i < adev->num_ip_blocks; i++) {
3918                 if (!adev->ip_blocks[i].status.valid)
3919                         continue;
3920                 if (adev->ip_blocks[i].status.hang &&
3921                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3922                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3923                         if (r)
3924                                 return r;
3925                 }
3926         }
3927
3928         return 0;
3929 }
3930
3931 /**
3932  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3933  *
3934  * @adev: amdgpu_device pointer
3935  *
3936  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3937  * reset is necessary to recover.
3938  * Returns true if a full asic reset is required, false if not.
3939  */
3940 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3941 {
3942         int i;
3943
3944         if (amdgpu_asic_need_full_reset(adev))
3945                 return true;
3946
3947         for (i = 0; i < adev->num_ip_blocks; i++) {
3948                 if (!adev->ip_blocks[i].status.valid)
3949                         continue;
3950                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3951                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3952                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3953                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3954                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3955                         if (adev->ip_blocks[i].status.hang) {
3956                                 dev_info(adev->dev, "Some block need full reset!\n");
3957                                 return true;
3958                         }
3959                 }
3960         }
3961         return false;
3962 }
3963
3964 /**
3965  * amdgpu_device_ip_soft_reset - do a soft reset
3966  *
3967  * @adev: amdgpu_device pointer
3968  *
3969  * The list of all the hardware IPs that make up the asic is walked and the
3970  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3971  * IP specific hardware or software state changes that are necessary to soft
3972  * reset the IP.
3973  * Returns 0 on success, negative error code on failure.
3974  */
3975 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3976 {
3977         int i, r = 0;
3978
3979         for (i = 0; i < adev->num_ip_blocks; i++) {
3980                 if (!adev->ip_blocks[i].status.valid)
3981                         continue;
3982                 if (adev->ip_blocks[i].status.hang &&
3983                     adev->ip_blocks[i].version->funcs->soft_reset) {
3984                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3985                         if (r)
3986                                 return r;
3987                 }
3988         }
3989
3990         return 0;
3991 }
3992
3993 /**
3994  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3995  *
3996  * @adev: amdgpu_device pointer
3997  *
3998  * The list of all the hardware IPs that make up the asic is walked and the
3999  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4000  * handles any IP specific hardware or software state changes that are
4001  * necessary after the IP has been soft reset.
4002  * Returns 0 on success, negative error code on failure.
4003  */
4004 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4005 {
4006         int i, r = 0;
4007
4008         for (i = 0; i < adev->num_ip_blocks; i++) {
4009                 if (!adev->ip_blocks[i].status.valid)
4010                         continue;
4011                 if (adev->ip_blocks[i].status.hang &&
4012                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4013                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4014                 if (r)
4015                         return r;
4016         }
4017
4018         return 0;
4019 }
4020
4021 /**
4022  * amdgpu_device_recover_vram - Recover some VRAM contents
4023  *
4024  * @adev: amdgpu_device pointer
4025  *
4026  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4027  * restore things like GPUVM page tables after a GPU reset where
4028  * the contents of VRAM might be lost.
4029  *
4030  * Returns:
4031  * 0 on success, negative error code on failure.
4032  */
4033 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4034 {
4035         struct dma_fence *fence = NULL, *next = NULL;
4036         struct amdgpu_bo *shadow;
4037         long r = 1, tmo;
4038
4039         if (amdgpu_sriov_runtime(adev))
4040                 tmo = msecs_to_jiffies(8000);
4041         else
4042                 tmo = msecs_to_jiffies(100);
4043
4044         dev_info(adev->dev, "recover vram bo from shadow start\n");
4045         mutex_lock(&adev->shadow_list_lock);
4046         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4047
4048                 /* No need to recover an evicted BO */
4049                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4050                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4051                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4052                         continue;
4053
4054                 r = amdgpu_bo_restore_shadow(shadow, &next);
4055                 if (r)
4056                         break;
4057
4058                 if (fence) {
4059                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4060                         dma_fence_put(fence);
4061                         fence = next;
4062                         if (tmo == 0) {
4063                                 r = -ETIMEDOUT;
4064                                 break;
4065                         } else if (tmo < 0) {
4066                                 r = tmo;
4067                                 break;
4068                         }
4069                 } else {
4070                         fence = next;
4071                 }
4072         }
4073         mutex_unlock(&adev->shadow_list_lock);
4074
4075         if (fence)
4076                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4077         dma_fence_put(fence);
4078
4079         if (r < 0 || tmo <= 0) {
4080                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4081                 return -EIO;
4082         }
4083
4084         dev_info(adev->dev, "recover vram bo from shadow done\n");
4085         return 0;
4086 }
4087
4088
4089 /**
4090  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4091  *
4092  * @adev: amdgpu_device pointer
4093  * @from_hypervisor: request from hypervisor
4094  *
4095  * do VF FLR and reinitialize Asic
4096  * return 0 means succeeded otherwise failed
4097  */
4098 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4099                                      bool from_hypervisor)
4100 {
4101         int r;
4102
4103         if (from_hypervisor)
4104                 r = amdgpu_virt_request_full_gpu(adev, true);
4105         else
4106                 r = amdgpu_virt_reset_gpu(adev);
4107         if (r)
4108                 return r;
4109
4110         amdgpu_amdkfd_pre_reset(adev);
4111
4112         /* Resume IP prior to SMC */
4113         r = amdgpu_device_ip_reinit_early_sriov(adev);
4114         if (r)
4115                 goto error;
4116
4117         amdgpu_virt_init_data_exchange(adev);
4118         /* we need recover gart prior to run SMC/CP/SDMA resume */
4119         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4120
4121         r = amdgpu_device_fw_loading(adev);
4122         if (r)
4123                 return r;
4124
4125         /* now we are okay to resume SMC/CP/SDMA */
4126         r = amdgpu_device_ip_reinit_late_sriov(adev);
4127         if (r)
4128                 goto error;
4129
4130         amdgpu_irq_gpu_reset_resume_helper(adev);
4131         r = amdgpu_ib_ring_tests(adev);
4132         amdgpu_amdkfd_post_reset(adev);
4133
4134 error:
4135         amdgpu_virt_release_full_gpu(adev, true);
4136         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4137                 amdgpu_inc_vram_lost(adev);
4138                 r = amdgpu_device_recover_vram(adev);
4139         }
4140
4141         return r;
4142 }
4143
4144 /**
4145  * amdgpu_device_has_job_running - check if there is any job in mirror list
4146  *
4147  * @adev: amdgpu_device pointer
4148  *
4149  * check if there is any job in mirror list
4150  */
4151 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4152 {
4153         int i;
4154         struct drm_sched_job *job;
4155
4156         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4157                 struct amdgpu_ring *ring = adev->rings[i];
4158
4159                 if (!ring || !ring->sched.thread)
4160                         continue;
4161
4162                 spin_lock(&ring->sched.job_list_lock);
4163                 job = list_first_entry_or_null(&ring->sched.pending_list,
4164                                                struct drm_sched_job, list);
4165                 spin_unlock(&ring->sched.job_list_lock);
4166                 if (job)
4167                         return true;
4168         }
4169         return false;
4170 }
4171
4172 /**
4173  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4174  *
4175  * @adev: amdgpu_device pointer
4176  *
4177  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4178  * a hung GPU.
4179  */
4180 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4181 {
4182         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4183                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4184                 return false;
4185         }
4186
4187         if (amdgpu_gpu_recovery == 0)
4188                 goto disabled;
4189
4190         if (amdgpu_sriov_vf(adev))
4191                 return true;
4192
4193         if (amdgpu_gpu_recovery == -1) {
4194                 switch (adev->asic_type) {
4195                 case CHIP_BONAIRE:
4196                 case CHIP_HAWAII:
4197                 case CHIP_TOPAZ:
4198                 case CHIP_TONGA:
4199                 case CHIP_FIJI:
4200                 case CHIP_POLARIS10:
4201                 case CHIP_POLARIS11:
4202                 case CHIP_POLARIS12:
4203                 case CHIP_VEGAM:
4204                 case CHIP_VEGA20:
4205                 case CHIP_VEGA10:
4206                 case CHIP_VEGA12:
4207                 case CHIP_RAVEN:
4208                 case CHIP_ARCTURUS:
4209                 case CHIP_RENOIR:
4210                 case CHIP_NAVI10:
4211                 case CHIP_NAVI14:
4212                 case CHIP_NAVI12:
4213                 case CHIP_SIENNA_CICHLID:
4214                 case CHIP_NAVY_FLOUNDER:
4215                 case CHIP_VANGOGH:
4216                         break;
4217                 default:
4218                         goto disabled;
4219                 }
4220         }
4221
4222         return true;
4223
4224 disabled:
4225                 dev_info(adev->dev, "GPU recovery disabled.\n");
4226                 return false;
4227 }
4228
4229
4230 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4231                                         struct amdgpu_job *job,
4232                                         bool *need_full_reset_arg)
4233 {
4234         int i, r = 0;
4235         bool need_full_reset  = *need_full_reset_arg;
4236
4237         amdgpu_debugfs_wait_dump(adev);
4238
4239         if (amdgpu_sriov_vf(adev)) {
4240                 /* stop the data exchange thread */
4241                 amdgpu_virt_fini_data_exchange(adev);
4242         }
4243
4244         /* block all schedulers and reset given job's ring */
4245         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4246                 struct amdgpu_ring *ring = adev->rings[i];
4247
4248                 if (!ring || !ring->sched.thread)
4249                         continue;
4250
4251                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4252                 amdgpu_fence_driver_force_completion(ring);
4253         }
4254
4255         if(job)
4256                 drm_sched_increase_karma(&job->base);
4257
4258         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4259         if (!amdgpu_sriov_vf(adev)) {
4260
4261                 if (!need_full_reset)
4262                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4263
4264                 if (!need_full_reset) {
4265                         amdgpu_device_ip_pre_soft_reset(adev);
4266                         r = amdgpu_device_ip_soft_reset(adev);
4267                         amdgpu_device_ip_post_soft_reset(adev);
4268                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4269                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4270                                 need_full_reset = true;
4271                         }
4272                 }
4273
4274                 if (need_full_reset)
4275                         r = amdgpu_device_ip_suspend(adev);
4276
4277                 *need_full_reset_arg = need_full_reset;
4278         }
4279
4280         return r;
4281 }
4282
4283 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4284                                struct list_head *device_list_handle,
4285                                bool *need_full_reset_arg,
4286                                bool skip_hw_reset)
4287 {
4288         struct amdgpu_device *tmp_adev = NULL;
4289         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4290         int r = 0;
4291
4292         /*
4293          * ASIC reset has to be done on all HGMI hive nodes ASAP
4294          * to allow proper links negotiation in FW (within 1 sec)
4295          */
4296         if (!skip_hw_reset && need_full_reset) {
4297                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4298                         /* For XGMI run all resets in parallel to speed up the process */
4299                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4300                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4301                                         r = -EALREADY;
4302                         } else
4303                                 r = amdgpu_asic_reset(tmp_adev);
4304
4305                         if (r) {
4306                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4307                                          r, adev_to_drm(tmp_adev)->unique);
4308                                 break;
4309                         }
4310                 }
4311
4312                 /* For XGMI wait for all resets to complete before proceed */
4313                 if (!r) {
4314                         list_for_each_entry(tmp_adev, device_list_handle,
4315                                             gmc.xgmi.head) {
4316                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4317                                         flush_work(&tmp_adev->xgmi_reset_work);
4318                                         r = tmp_adev->asic_reset_res;
4319                                         if (r)
4320                                                 break;
4321                                 }
4322                         }
4323                 }
4324         }
4325
4326         if (!r && amdgpu_ras_intr_triggered()) {
4327                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4328                         if (tmp_adev->mmhub.funcs &&
4329                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4330                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4331                 }
4332
4333                 amdgpu_ras_intr_cleared();
4334         }
4335
4336         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4337                 if (need_full_reset) {
4338                         /* post card */
4339                         if (amdgpu_device_asic_init(tmp_adev))
4340                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4341
4342                         if (!r) {
4343                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4344                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4345                                 if (r)
4346                                         goto out;
4347
4348                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4349                                 if (vram_lost) {
4350                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4351                                         amdgpu_inc_vram_lost(tmp_adev);
4352                                 }
4353
4354                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4355                                 if (r)
4356                                         goto out;
4357
4358                                 r = amdgpu_device_fw_loading(tmp_adev);
4359                                 if (r)
4360                                         return r;
4361
4362                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4363                                 if (r)
4364                                         goto out;
4365
4366                                 if (vram_lost)
4367                                         amdgpu_device_fill_reset_magic(tmp_adev);
4368
4369                                 /*
4370                                  * Add this ASIC as tracked as reset was already
4371                                  * complete successfully.
4372                                  */
4373                                 amdgpu_register_gpu_instance(tmp_adev);
4374
4375                                 r = amdgpu_device_ip_late_init(tmp_adev);
4376                                 if (r)
4377                                         goto out;
4378
4379                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4380
4381                                 /*
4382                                  * The GPU enters bad state once faulty pages
4383                                  * by ECC has reached the threshold, and ras
4384                                  * recovery is scheduled next. So add one check
4385                                  * here to break recovery if it indeed exceeds
4386                                  * bad page threshold, and remind user to
4387                                  * retire this GPU or setting one bigger
4388                                  * bad_page_threshold value to fix this once
4389                                  * probing driver again.
4390                                  */
4391                                 if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
4392                                         /* must succeed. */
4393                                         amdgpu_ras_resume(tmp_adev);
4394                                 } else {
4395                                         r = -EINVAL;
4396                                         goto out;
4397                                 }
4398
4399                                 /* Update PSP FW topology after reset */
4400                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4401                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4402                         }
4403                 }
4404
4405 out:
4406                 if (!r) {
4407                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4408                         r = amdgpu_ib_ring_tests(tmp_adev);
4409                         if (r) {
4410                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4411                                 r = amdgpu_device_ip_suspend(tmp_adev);
4412                                 need_full_reset = true;
4413                                 r = -EAGAIN;
4414                                 goto end;
4415                         }
4416                 }
4417
4418                 if (!r)
4419                         r = amdgpu_device_recover_vram(tmp_adev);
4420                 else
4421                         tmp_adev->asic_reset_res = r;
4422         }
4423
4424 end:
4425         *need_full_reset_arg = need_full_reset;
4426         return r;
4427 }
4428
4429 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4430                                 struct amdgpu_hive_info *hive)
4431 {
4432         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4433                 return false;
4434
4435         if (hive) {
4436                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4437         } else {
4438                 down_write(&adev->reset_sem);
4439         }
4440
4441         atomic_inc(&adev->gpu_reset_counter);
4442         switch (amdgpu_asic_reset_method(adev)) {
4443         case AMD_RESET_METHOD_MODE1:
4444                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4445                 break;
4446         case AMD_RESET_METHOD_MODE2:
4447                 adev->mp1_state = PP_MP1_STATE_RESET;
4448                 break;
4449         default:
4450                 adev->mp1_state = PP_MP1_STATE_NONE;
4451                 break;
4452         }
4453
4454         return true;
4455 }
4456
4457 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4458 {
4459         amdgpu_vf_error_trans_all(adev);
4460         adev->mp1_state = PP_MP1_STATE_NONE;
4461         atomic_set(&adev->in_gpu_reset, 0);
4462         up_write(&adev->reset_sem);
4463 }
4464
4465 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4466 {
4467         struct pci_dev *p = NULL;
4468
4469         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4470                         adev->pdev->bus->number, 1);
4471         if (p) {
4472                 pm_runtime_enable(&(p->dev));
4473                 pm_runtime_resume(&(p->dev));
4474         }
4475 }
4476
4477 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4478 {
4479         enum amd_reset_method reset_method;
4480         struct pci_dev *p = NULL;
4481         u64 expires;
4482
4483         /*
4484          * For now, only BACO and mode1 reset are confirmed
4485          * to suffer the audio issue without proper suspended.
4486          */
4487         reset_method = amdgpu_asic_reset_method(adev);
4488         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4489              (reset_method != AMD_RESET_METHOD_MODE1))
4490                 return -EINVAL;
4491
4492         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4493                         adev->pdev->bus->number, 1);
4494         if (!p)
4495                 return -ENODEV;
4496
4497         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4498         if (!expires)
4499                 /*
4500                  * If we cannot get the audio device autosuspend delay,
4501                  * a fixed 4S interval will be used. Considering 3S is
4502                  * the audio controller default autosuspend delay setting.
4503                  * 4S used here is guaranteed to cover that.
4504                  */
4505                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4506
4507         while (!pm_runtime_status_suspended(&(p->dev))) {
4508                 if (!pm_runtime_suspend(&(p->dev)))
4509                         break;
4510
4511                 if (expires < ktime_get_mono_fast_ns()) {
4512                         dev_warn(adev->dev, "failed to suspend display audio\n");
4513                         /* TODO: abort the succeeding gpu reset? */
4514                         return -ETIMEDOUT;
4515                 }
4516         }
4517
4518         pm_runtime_disable(&(p->dev));
4519
4520         return 0;
4521 }
4522
4523 /**
4524  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4525  *
4526  * @adev: amdgpu_device pointer
4527  * @job: which job trigger hang
4528  *
4529  * Attempt to reset the GPU if it has hung (all asics).
4530  * Attempt to do soft-reset or full-reset and reinitialize Asic
4531  * Returns 0 for success or an error on failure.
4532  */
4533
4534 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4535                               struct amdgpu_job *job)
4536 {
4537         struct list_head device_list, *device_list_handle =  NULL;
4538         bool need_full_reset = false;
4539         bool job_signaled = false;
4540         struct amdgpu_hive_info *hive = NULL;
4541         struct amdgpu_device *tmp_adev = NULL;
4542         int i, r = 0;
4543         bool need_emergency_restart = false;
4544         bool audio_suspended = false;
4545
4546         /*
4547          * Special case: RAS triggered and full reset isn't supported
4548          */
4549         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4550
4551         /*
4552          * Flush RAM to disk so that after reboot
4553          * the user can read log and see why the system rebooted.
4554          */
4555         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4556                 DRM_WARN("Emergency reboot.");
4557
4558                 ksys_sync_helper();
4559                 emergency_restart();
4560         }
4561
4562         dev_info(adev->dev, "GPU %s begin!\n",
4563                 need_emergency_restart ? "jobs stop":"reset");
4564
4565         /*
4566          * Here we trylock to avoid chain of resets executing from
4567          * either trigger by jobs on different adevs in XGMI hive or jobs on
4568          * different schedulers for same device while this TO handler is running.
4569          * We always reset all schedulers for device and all devices for XGMI
4570          * hive so that should take care of them too.
4571          */
4572         hive = amdgpu_get_xgmi_hive(adev);
4573         if (hive) {
4574                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4575                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4576                                 job ? job->base.id : -1, hive->hive_id);
4577                         amdgpu_put_xgmi_hive(hive);
4578                         return 0;
4579                 }
4580                 mutex_lock(&hive->hive_lock);
4581         }
4582
4583         /*
4584          * Build list of devices to reset.
4585          * In case we are in XGMI hive mode, resort the device list
4586          * to put adev in the 1st position.
4587          */
4588         INIT_LIST_HEAD(&device_list);
4589         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4590                 if (!hive)
4591                         return -ENODEV;
4592                 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4593                         list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4594                 device_list_handle = &hive->device_list;
4595         } else {
4596                 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4597                 device_list_handle = &device_list;
4598         }
4599
4600         /* block all schedulers and reset given job's ring */
4601         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4602                 if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
4603                         dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4604                                   job ? job->base.id : -1);
4605                         r = 0;
4606                         goto skip_recovery;
4607                 }
4608
4609                 /*
4610                  * Try to put the audio codec into suspend state
4611                  * before gpu reset started.
4612                  *
4613                  * Due to the power domain of the graphics device
4614                  * is shared with AZ power domain. Without this,
4615                  * we may change the audio hardware from behind
4616                  * the audio driver's back. That will trigger
4617                  * some audio codec errors.
4618                  */
4619                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4620                         audio_suspended = true;
4621
4622                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4623
4624                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4625
4626                 if (!amdgpu_sriov_vf(tmp_adev))
4627                         amdgpu_amdkfd_pre_reset(tmp_adev);
4628
4629                 /*
4630                  * Mark these ASICs to be reseted as untracked first
4631                  * And add them back after reset completed
4632                  */
4633                 amdgpu_unregister_gpu_instance(tmp_adev);
4634
4635                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4636
4637                 /* disable ras on ALL IPs */
4638                 if (!need_emergency_restart &&
4639                       amdgpu_device_ip_need_full_reset(tmp_adev))
4640                         amdgpu_ras_suspend(tmp_adev);
4641
4642                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4643                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4644
4645                         if (!ring || !ring->sched.thread)
4646                                 continue;
4647
4648                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4649
4650                         if (need_emergency_restart)
4651                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4652                 }
4653         }
4654
4655         if (need_emergency_restart)
4656                 goto skip_sched_resume;
4657
4658         /*
4659          * Must check guilty signal here since after this point all old
4660          * HW fences are force signaled.
4661          *
4662          * job->base holds a reference to parent fence
4663          */
4664         if (job && job->base.s_fence->parent &&
4665             dma_fence_is_signaled(job->base.s_fence->parent)) {
4666                 job_signaled = true;
4667                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4668                 goto skip_hw_reset;
4669         }
4670
4671 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4672         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4673                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4674                                                  (tmp_adev == adev) ? job : NULL,
4675                                                  &need_full_reset);
4676                 /*TODO Should we stop ?*/
4677                 if (r) {
4678                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4679                                   r, adev_to_drm(tmp_adev)->unique);
4680                         tmp_adev->asic_reset_res = r;
4681                 }
4682         }
4683
4684         /* Actual ASIC resets if needed.*/
4685         /* TODO Implement XGMI hive reset logic for SRIOV */
4686         if (amdgpu_sriov_vf(adev)) {
4687                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4688                 if (r)
4689                         adev->asic_reset_res = r;
4690         } else {
4691                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4692                 if (r && r == -EAGAIN)
4693                         goto retry;
4694         }
4695
4696 skip_hw_reset:
4697
4698         /* Post ASIC reset for all devs .*/
4699         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4700
4701                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4702                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4703
4704                         if (!ring || !ring->sched.thread)
4705                                 continue;
4706
4707                         /* No point to resubmit jobs if we didn't HW reset*/
4708                         if (!tmp_adev->asic_reset_res && !job_signaled)
4709                                 drm_sched_resubmit_jobs(&ring->sched);
4710
4711                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4712                 }
4713
4714                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4715                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4716                 }
4717
4718                 tmp_adev->asic_reset_res = 0;
4719
4720                 if (r) {
4721                         /* bad news, how to tell it to userspace ? */
4722                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4723                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4724                 } else {
4725                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4726                 }
4727         }
4728
4729 skip_sched_resume:
4730         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4731                 /*unlock kfd: SRIOV would do it separately */
4732                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4733                         amdgpu_amdkfd_post_reset(tmp_adev);
4734                 if (audio_suspended)
4735                         amdgpu_device_resume_display_audio(tmp_adev);
4736                 amdgpu_device_unlock_adev(tmp_adev);
4737         }
4738
4739 skip_recovery:
4740         if (hive) {
4741                 atomic_set(&hive->in_reset, 0);
4742                 mutex_unlock(&hive->hive_lock);
4743                 amdgpu_put_xgmi_hive(hive);
4744         }
4745
4746         if (r)
4747                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4748         return r;
4749 }
4750
4751 /**
4752  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4753  *
4754  * @adev: amdgpu_device pointer
4755  *
4756  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4757  * and lanes) of the slot the device is in. Handles APUs and
4758  * virtualized environments where PCIE config space may not be available.
4759  */
4760 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4761 {
4762         struct pci_dev *pdev;
4763         enum pci_bus_speed speed_cap, platform_speed_cap;
4764         enum pcie_link_width platform_link_width;
4765
4766         if (amdgpu_pcie_gen_cap)
4767                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4768
4769         if (amdgpu_pcie_lane_cap)
4770                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4771
4772         /* covers APUs as well */
4773         if (pci_is_root_bus(adev->pdev->bus)) {
4774                 if (adev->pm.pcie_gen_mask == 0)
4775                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4776                 if (adev->pm.pcie_mlw_mask == 0)
4777                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4778                 return;
4779         }
4780
4781         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4782                 return;
4783
4784         pcie_bandwidth_available(adev->pdev, NULL,
4785                                  &platform_speed_cap, &platform_link_width);
4786
4787         if (adev->pm.pcie_gen_mask == 0) {
4788                 /* asic caps */
4789                 pdev = adev->pdev;
4790                 speed_cap = pcie_get_speed_cap(pdev);
4791                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4792                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4793                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4794                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4795                 } else {
4796                         if (speed_cap == PCIE_SPEED_16_0GT)
4797                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4798                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4799                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4800                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4801                         else if (speed_cap == PCIE_SPEED_8_0GT)
4802                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4803                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4804                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4805                         else if (speed_cap == PCIE_SPEED_5_0GT)
4806                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4807                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4808                         else
4809                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4810                 }
4811                 /* platform caps */
4812                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4813                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4814                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4815                 } else {
4816                         if (platform_speed_cap == PCIE_SPEED_16_0GT)
4817                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4818                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4819                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4820                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4821                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4822                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4823                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4824                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4825                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4826                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4827                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4828                         else
4829                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4830
4831                 }
4832         }
4833         if (adev->pm.pcie_mlw_mask == 0) {
4834                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4835                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4836                 } else {
4837                         switch (platform_link_width) {
4838                         case PCIE_LNK_X32:
4839                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4840                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4841                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4842                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4843                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4844                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4845                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4846                                 break;
4847                         case PCIE_LNK_X16:
4848                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4849                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4850                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4851                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4852                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4853                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4854                                 break;
4855                         case PCIE_LNK_X12:
4856                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4857                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4858                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4859                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4860                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4861                                 break;
4862                         case PCIE_LNK_X8:
4863                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4864                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4865                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4866                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4867                                 break;
4868                         case PCIE_LNK_X4:
4869                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4870                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4871                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4872                                 break;
4873                         case PCIE_LNK_X2:
4874                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4875                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4876                                 break;
4877                         case PCIE_LNK_X1:
4878                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4879                                 break;
4880                         default:
4881                                 break;
4882                         }
4883                 }
4884         }
4885 }
4886
4887 int amdgpu_device_baco_enter(struct drm_device *dev)
4888 {
4889         struct amdgpu_device *adev = drm_to_adev(dev);
4890         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4891
4892         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4893                 return -ENOTSUPP;
4894
4895         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4896                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4897
4898         return amdgpu_dpm_baco_enter(adev);
4899 }
4900
4901 int amdgpu_device_baco_exit(struct drm_device *dev)
4902 {
4903         struct amdgpu_device *adev = drm_to_adev(dev);
4904         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4905         int ret = 0;
4906
4907         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4908                 return -ENOTSUPP;
4909
4910         ret = amdgpu_dpm_baco_exit(adev);
4911         if (ret)
4912                 return ret;
4913
4914         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4915                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4916
4917         return 0;
4918 }
4919
4920 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
4921 {
4922         int i;
4923
4924         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4925                 struct amdgpu_ring *ring = adev->rings[i];
4926
4927                 if (!ring || !ring->sched.thread)
4928                         continue;
4929
4930                 cancel_delayed_work_sync(&ring->sched.work_tdr);
4931         }
4932 }
4933
4934 /**
4935  * amdgpu_pci_error_detected - Called when a PCI error is detected.
4936  * @pdev: PCI device struct
4937  * @state: PCI channel state
4938  *
4939  * Description: Called when a PCI error is detected.
4940  *
4941  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
4942  */
4943 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4944 {
4945         struct drm_device *dev = pci_get_drvdata(pdev);
4946         struct amdgpu_device *adev = drm_to_adev(dev);
4947         int i;
4948
4949         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
4950
4951         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4952                 DRM_WARN("No support for XGMI hive yet...");
4953                 return PCI_ERS_RESULT_DISCONNECT;
4954         }
4955
4956         switch (state) {
4957         case pci_channel_io_normal:
4958                 return PCI_ERS_RESULT_CAN_RECOVER;
4959         /* Fatal error, prepare for slot reset */
4960         case pci_channel_io_frozen:
4961                 /*
4962                  * Cancel and wait for all TDRs in progress if failing to
4963                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
4964                  *
4965                  * Locking adev->reset_sem will prevent any external access
4966                  * to GPU during PCI error recovery
4967                  */
4968                 while (!amdgpu_device_lock_adev(adev, NULL))
4969                         amdgpu_cancel_all_tdr(adev);
4970
4971                 /*
4972                  * Block any work scheduling as we do for regular GPU reset
4973                  * for the duration of the recovery
4974                  */
4975                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4976                         struct amdgpu_ring *ring = adev->rings[i];
4977
4978                         if (!ring || !ring->sched.thread)
4979                                 continue;
4980
4981                         drm_sched_stop(&ring->sched, NULL);
4982                 }
4983                 return PCI_ERS_RESULT_NEED_RESET;
4984         case pci_channel_io_perm_failure:
4985                 /* Permanent error, prepare for device removal */
4986                 return PCI_ERS_RESULT_DISCONNECT;
4987         }
4988
4989         return PCI_ERS_RESULT_NEED_RESET;
4990 }
4991
4992 /**
4993  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
4994  * @pdev: pointer to PCI device
4995  */
4996 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
4997 {
4998
4999         DRM_INFO("PCI error: mmio enabled callback!!\n");
5000
5001         /* TODO - dump whatever for debugging purposes */
5002
5003         /* This called only if amdgpu_pci_error_detected returns
5004          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5005          * works, no need to reset slot.
5006          */
5007
5008         return PCI_ERS_RESULT_RECOVERED;
5009 }
5010
5011 /**
5012  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5013  * @pdev: PCI device struct
5014  *
5015  * Description: This routine is called by the pci error recovery
5016  * code after the PCI slot has been reset, just before we
5017  * should resume normal operations.
5018  */
5019 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5020 {
5021         struct drm_device *dev = pci_get_drvdata(pdev);
5022         struct amdgpu_device *adev = drm_to_adev(dev);
5023         int r, i;
5024         bool need_full_reset = true;
5025         u32 memsize;
5026         struct list_head device_list;
5027
5028         DRM_INFO("PCI error: slot reset callback!!\n");
5029
5030         INIT_LIST_HEAD(&device_list);
5031         list_add_tail(&adev->gmc.xgmi.head, &device_list);
5032
5033         /* wait for asic to come out of reset */
5034         msleep(500);
5035
5036         /* Restore PCI confspace */
5037         amdgpu_device_load_pci_state(pdev);
5038
5039         /* confirm  ASIC came out of reset */
5040         for (i = 0; i < adev->usec_timeout; i++) {
5041                 memsize = amdgpu_asic_get_config_memsize(adev);
5042
5043                 if (memsize != 0xffffffff)
5044                         break;
5045                 udelay(1);
5046         }
5047         if (memsize == 0xffffffff) {
5048                 r = -ETIME;
5049                 goto out;
5050         }
5051
5052         adev->in_pci_err_recovery = true;
5053         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5054         adev->in_pci_err_recovery = false;
5055         if (r)
5056                 goto out;
5057
5058         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5059
5060 out:
5061         if (!r) {
5062                 if (amdgpu_device_cache_pci_state(adev->pdev))
5063                         pci_restore_state(adev->pdev);
5064
5065                 DRM_INFO("PCIe error recovery succeeded\n");
5066         } else {
5067                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5068                 amdgpu_device_unlock_adev(adev);
5069         }
5070
5071         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5072 }
5073
5074 /**
5075  * amdgpu_pci_resume() - resume normal ops after PCI reset
5076  * @pdev: pointer to PCI device
5077  *
5078  * Called when the error recovery driver tells us that its
5079  * OK to resume normal operation.
5080  */
5081 void amdgpu_pci_resume(struct pci_dev *pdev)
5082 {
5083         struct drm_device *dev = pci_get_drvdata(pdev);
5084         struct amdgpu_device *adev = drm_to_adev(dev);
5085         int i;
5086
5087
5088         DRM_INFO("PCI error: resume callback!!\n");
5089
5090         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5091                 struct amdgpu_ring *ring = adev->rings[i];
5092
5093                 if (!ring || !ring->sched.thread)
5094                         continue;
5095
5096
5097                 drm_sched_resubmit_jobs(&ring->sched);
5098                 drm_sched_start(&ring->sched, true);
5099         }
5100
5101         amdgpu_device_unlock_adev(adev);
5102 }
5103
5104 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5105 {
5106         struct drm_device *dev = pci_get_drvdata(pdev);
5107         struct amdgpu_device *adev = drm_to_adev(dev);
5108         int r;
5109
5110         r = pci_save_state(pdev);
5111         if (!r) {
5112                 kfree(adev->pci_state);
5113
5114                 adev->pci_state = pci_store_saved_state(pdev);
5115
5116                 if (!adev->pci_state) {
5117                         DRM_ERROR("Failed to store PCI saved state");
5118                         return false;
5119                 }
5120         } else {
5121                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5122                 return false;
5123         }
5124
5125         return true;
5126 }
5127
5128 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5129 {
5130         struct drm_device *dev = pci_get_drvdata(pdev);
5131         struct amdgpu_device *adev = drm_to_adev(dev);
5132         int r;
5133
5134         if (!adev->pci_state)
5135                 return false;
5136
5137         r = pci_load_saved_state(pdev, adev->pci_state);
5138
5139         if (!r) {
5140                 pci_restore_state(pdev);
5141         } else {
5142                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5143                 return false;
5144         }
5145
5146         return true;
5147 }
5148
5149