drm/amd/amdgpu: move inc gpu_reset_counter after drm_sched_stop
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84
85 #define AMDGPU_RESUME_MS                2000
86
87 const char *amdgpu_asic_name[] = {
88         "TAHITI",
89         "PITCAIRN",
90         "VERDE",
91         "OLAND",
92         "HAINAN",
93         "BONAIRE",
94         "KAVERI",
95         "KABINI",
96         "HAWAII",
97         "MULLINS",
98         "TOPAZ",
99         "TONGA",
100         "FIJI",
101         "CARRIZO",
102         "STONEY",
103         "POLARIS10",
104         "POLARIS11",
105         "POLARIS12",
106         "VEGAM",
107         "VEGA10",
108         "VEGA12",
109         "VEGA20",
110         "RAVEN",
111         "ARCTURUS",
112         "RENOIR",
113         "NAVI10",
114         "NAVI14",
115         "NAVI12",
116         "SIENNA_CICHLID",
117         "NAVY_FLOUNDER",
118         "VANGOGH",
119         "DIMGREY_CAVEFISH",
120         "LAST",
121 };
122
123 /**
124  * DOC: pcie_replay_count
125  *
126  * The amdgpu driver provides a sysfs API for reporting the total number
127  * of PCIe replays (NAKs)
128  * The file pcie_replay_count is used for this and returns the total
129  * number of replays as a sum of the NAKs generated and NAKs received
130  */
131
132 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
133                 struct device_attribute *attr, char *buf)
134 {
135         struct drm_device *ddev = dev_get_drvdata(dev);
136         struct amdgpu_device *adev = drm_to_adev(ddev);
137         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
138
139         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
140 }
141
142 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
143                 amdgpu_device_get_pcie_replay_count, NULL);
144
145 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
146
147 /**
148  * DOC: product_name
149  *
150  * The amdgpu driver provides a sysfs API for reporting the product name
151  * for the device
152  * The file serial_number is used for this and returns the product name
153  * as returned from the FRU.
154  * NOTE: This is only available for certain server cards
155  */
156
157 static ssize_t amdgpu_device_get_product_name(struct device *dev,
158                 struct device_attribute *attr, char *buf)
159 {
160         struct drm_device *ddev = dev_get_drvdata(dev);
161         struct amdgpu_device *adev = drm_to_adev(ddev);
162
163         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
164 }
165
166 static DEVICE_ATTR(product_name, S_IRUGO,
167                 amdgpu_device_get_product_name, NULL);
168
169 /**
170  * DOC: product_number
171  *
172  * The amdgpu driver provides a sysfs API for reporting the part number
173  * for the device
174  * The file serial_number is used for this and returns the part number
175  * as returned from the FRU.
176  * NOTE: This is only available for certain server cards
177  */
178
179 static ssize_t amdgpu_device_get_product_number(struct device *dev,
180                 struct device_attribute *attr, char *buf)
181 {
182         struct drm_device *ddev = dev_get_drvdata(dev);
183         struct amdgpu_device *adev = drm_to_adev(ddev);
184
185         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
186 }
187
188 static DEVICE_ATTR(product_number, S_IRUGO,
189                 amdgpu_device_get_product_number, NULL);
190
191 /**
192  * DOC: serial_number
193  *
194  * The amdgpu driver provides a sysfs API for reporting the serial number
195  * for the device
196  * The file serial_number is used for this and returns the serial number
197  * as returned from the FRU.
198  * NOTE: This is only available for certain server cards
199  */
200
201 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
202                 struct device_attribute *attr, char *buf)
203 {
204         struct drm_device *ddev = dev_get_drvdata(dev);
205         struct amdgpu_device *adev = drm_to_adev(ddev);
206
207         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
208 }
209
210 static DEVICE_ATTR(serial_number, S_IRUGO,
211                 amdgpu_device_get_serial_number, NULL);
212
213 /**
214  * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
215  *
216  * @dev: drm_device pointer
217  *
218  * Returns true if the device is a dGPU with HG/PX power control,
219  * otherwise return false.
220  */
221 bool amdgpu_device_supports_atpx(struct drm_device *dev)
222 {
223         struct amdgpu_device *adev = drm_to_adev(dev);
224
225         if (adev->flags & AMD_IS_PX)
226                 return true;
227         return false;
228 }
229
230 /**
231  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
232  *
233  * @dev: drm_device pointer
234  *
235  * Returns true if the device is a dGPU with HG/PX power control,
236  * otherwise return false.
237  */
238 bool amdgpu_device_supports_boco(struct drm_device *dev)
239 {
240         struct amdgpu_device *adev = drm_to_adev(dev);
241
242         if (adev->has_pr3)
243                 return true;
244         return false;
245 }
246
247 /**
248  * amdgpu_device_supports_baco - Does the device support BACO
249  *
250  * @dev: drm_device pointer
251  *
252  * Returns true if the device supporte BACO,
253  * otherwise return false.
254  */
255 bool amdgpu_device_supports_baco(struct drm_device *dev)
256 {
257         struct amdgpu_device *adev = drm_to_adev(dev);
258
259         return amdgpu_asic_supports_baco(adev);
260 }
261
262 /*
263  * VRAM access helper functions
264  */
265
266 /**
267  * amdgpu_device_vram_access - read/write a buffer in vram
268  *
269  * @adev: amdgpu_device pointer
270  * @pos: offset of the buffer in vram
271  * @buf: virtual address of the buffer in system memory
272  * @size: read/write size, sizeof(@buf) must > @size
273  * @write: true - write to vram, otherwise - read from vram
274  */
275 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
276                                uint32_t *buf, size_t size, bool write)
277 {
278         unsigned long flags;
279         uint32_t hi = ~0;
280         uint64_t last;
281
282
283 #ifdef CONFIG_64BIT
284         last = min(pos + size, adev->gmc.visible_vram_size);
285         if (last > pos) {
286                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
287                 size_t count = last - pos;
288
289                 if (write) {
290                         memcpy_toio(addr, buf, count);
291                         mb();
292                         amdgpu_asic_flush_hdp(adev, NULL);
293                 } else {
294                         amdgpu_asic_invalidate_hdp(adev, NULL);
295                         mb();
296                         memcpy_fromio(buf, addr, count);
297                 }
298
299                 if (count == size)
300                         return;
301
302                 pos += count;
303                 buf += count / 4;
304                 size -= count;
305         }
306 #endif
307
308         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
309         for (last = pos + size; pos < last; pos += 4) {
310                 uint32_t tmp = pos >> 31;
311
312                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
313                 if (tmp != hi) {
314                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
315                         hi = tmp;
316                 }
317                 if (write)
318                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
319                 else
320                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
321         }
322         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
323 }
324
325 /*
326  * register access helper functions.
327  */
328 /**
329  * amdgpu_device_rreg - read a memory mapped IO or indirect register
330  *
331  * @adev: amdgpu_device pointer
332  * @reg: dword aligned register offset
333  * @acc_flags: access flags which require special behavior
334  *
335  * Returns the 32 bit value from the offset specified.
336  */
337 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
338                             uint32_t reg, uint32_t acc_flags)
339 {
340         uint32_t ret;
341
342         if (adev->in_pci_err_recovery)
343                 return 0;
344
345         if ((reg * 4) < adev->rmmio_size) {
346                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
347                     amdgpu_sriov_runtime(adev) &&
348                     down_read_trylock(&adev->reset_sem)) {
349                         ret = amdgpu_kiq_rreg(adev, reg);
350                         up_read(&adev->reset_sem);
351                 } else {
352                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
353                 }
354         } else {
355                 ret = adev->pcie_rreg(adev, reg * 4);
356         }
357
358         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
359
360         return ret;
361 }
362
363 /*
364  * MMIO register read with bytes helper functions
365  * @offset:bytes offset from MMIO start
366  *
367 */
368
369 /**
370  * amdgpu_mm_rreg8 - read a memory mapped IO register
371  *
372  * @adev: amdgpu_device pointer
373  * @offset: byte aligned register offset
374  *
375  * Returns the 8 bit value from the offset specified.
376  */
377 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
378 {
379         if (adev->in_pci_err_recovery)
380                 return 0;
381
382         if (offset < adev->rmmio_size)
383                 return (readb(adev->rmmio + offset));
384         BUG();
385 }
386
387 /*
388  * MMIO register write with bytes helper functions
389  * @offset:bytes offset from MMIO start
390  * @value: the value want to be written to the register
391  *
392 */
393 /**
394  * amdgpu_mm_wreg8 - read a memory mapped IO register
395  *
396  * @adev: amdgpu_device pointer
397  * @offset: byte aligned register offset
398  * @value: 8 bit value to write
399  *
400  * Writes the value specified to the offset specified.
401  */
402 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
403 {
404         if (adev->in_pci_err_recovery)
405                 return;
406
407         if (offset < adev->rmmio_size)
408                 writeb(value, adev->rmmio + offset);
409         else
410                 BUG();
411 }
412
413 /**
414  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
415  *
416  * @adev: amdgpu_device pointer
417  * @reg: dword aligned register offset
418  * @v: 32 bit value to write to the register
419  * @acc_flags: access flags which require special behavior
420  *
421  * Writes the value specified to the offset specified.
422  */
423 void amdgpu_device_wreg(struct amdgpu_device *adev,
424                         uint32_t reg, uint32_t v,
425                         uint32_t acc_flags)
426 {
427         if (adev->in_pci_err_recovery)
428                 return;
429
430         if ((reg * 4) < adev->rmmio_size) {
431                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
432                     amdgpu_sriov_runtime(adev) &&
433                     down_read_trylock(&adev->reset_sem)) {
434                         amdgpu_kiq_wreg(adev, reg, v);
435                         up_read(&adev->reset_sem);
436                 } else {
437                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
438                 }
439         } else {
440                 adev->pcie_wreg(adev, reg * 4, v);
441         }
442
443         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
444 }
445
446 /*
447  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
448  *
449  * this function is invoked only the debugfs register access
450  * */
451 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
452                              uint32_t reg, uint32_t v)
453 {
454         if (adev->in_pci_err_recovery)
455                 return;
456
457         if (amdgpu_sriov_fullaccess(adev) &&
458             adev->gfx.rlc.funcs &&
459             adev->gfx.rlc.funcs->is_rlcg_access_range) {
460                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
461                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
462         } else {
463                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
464         }
465 }
466
467 /**
468  * amdgpu_io_rreg - read an IO register
469  *
470  * @adev: amdgpu_device pointer
471  * @reg: dword aligned register offset
472  *
473  * Returns the 32 bit value from the offset specified.
474  */
475 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
476 {
477         if (adev->in_pci_err_recovery)
478                 return 0;
479
480         if ((reg * 4) < adev->rio_mem_size)
481                 return ioread32(adev->rio_mem + (reg * 4));
482         else {
483                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
484                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
485         }
486 }
487
488 /**
489  * amdgpu_io_wreg - write to an IO register
490  *
491  * @adev: amdgpu_device pointer
492  * @reg: dword aligned register offset
493  * @v: 32 bit value to write to the register
494  *
495  * Writes the value specified to the offset specified.
496  */
497 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
498 {
499         if (adev->in_pci_err_recovery)
500                 return;
501
502         if ((reg * 4) < adev->rio_mem_size)
503                 iowrite32(v, adev->rio_mem + (reg * 4));
504         else {
505                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
506                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
507         }
508 }
509
510 /**
511  * amdgpu_mm_rdoorbell - read a doorbell dword
512  *
513  * @adev: amdgpu_device pointer
514  * @index: doorbell index
515  *
516  * Returns the value in the doorbell aperture at the
517  * requested doorbell index (CIK).
518  */
519 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
520 {
521         if (adev->in_pci_err_recovery)
522                 return 0;
523
524         if (index < adev->doorbell.num_doorbells) {
525                 return readl(adev->doorbell.ptr + index);
526         } else {
527                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
528                 return 0;
529         }
530 }
531
532 /**
533  * amdgpu_mm_wdoorbell - write a doorbell dword
534  *
535  * @adev: amdgpu_device pointer
536  * @index: doorbell index
537  * @v: value to write
538  *
539  * Writes @v to the doorbell aperture at the
540  * requested doorbell index (CIK).
541  */
542 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
543 {
544         if (adev->in_pci_err_recovery)
545                 return;
546
547         if (index < adev->doorbell.num_doorbells) {
548                 writel(v, adev->doorbell.ptr + index);
549         } else {
550                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
551         }
552 }
553
554 /**
555  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
556  *
557  * @adev: amdgpu_device pointer
558  * @index: doorbell index
559  *
560  * Returns the value in the doorbell aperture at the
561  * requested doorbell index (VEGA10+).
562  */
563 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
564 {
565         if (adev->in_pci_err_recovery)
566                 return 0;
567
568         if (index < adev->doorbell.num_doorbells) {
569                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
570         } else {
571                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
572                 return 0;
573         }
574 }
575
576 /**
577  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
578  *
579  * @adev: amdgpu_device pointer
580  * @index: doorbell index
581  * @v: value to write
582  *
583  * Writes @v to the doorbell aperture at the
584  * requested doorbell index (VEGA10+).
585  */
586 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
587 {
588         if (adev->in_pci_err_recovery)
589                 return;
590
591         if (index < adev->doorbell.num_doorbells) {
592                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
593         } else {
594                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
595         }
596 }
597
598 /**
599  * amdgpu_device_indirect_rreg - read an indirect register
600  *
601  * @adev: amdgpu_device pointer
602  * @pcie_index: mmio register offset
603  * @pcie_data: mmio register offset
604  * @reg_addr: indirect register address to read from
605  *
606  * Returns the value of indirect register @reg_addr
607  */
608 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
609                                 u32 pcie_index, u32 pcie_data,
610                                 u32 reg_addr)
611 {
612         unsigned long flags;
613         u32 r;
614         void __iomem *pcie_index_offset;
615         void __iomem *pcie_data_offset;
616
617         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
618         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
619         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
620
621         writel(reg_addr, pcie_index_offset);
622         readl(pcie_index_offset);
623         r = readl(pcie_data_offset);
624         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
625
626         return r;
627 }
628
629 /**
630  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
631  *
632  * @adev: amdgpu_device pointer
633  * @pcie_index: mmio register offset
634  * @pcie_data: mmio register offset
635  * @reg_addr: indirect register address to read from
636  *
637  * Returns the value of indirect register @reg_addr
638  */
639 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
640                                   u32 pcie_index, u32 pcie_data,
641                                   u32 reg_addr)
642 {
643         unsigned long flags;
644         u64 r;
645         void __iomem *pcie_index_offset;
646         void __iomem *pcie_data_offset;
647
648         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
649         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
650         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
651
652         /* read low 32 bits */
653         writel(reg_addr, pcie_index_offset);
654         readl(pcie_index_offset);
655         r = readl(pcie_data_offset);
656         /* read high 32 bits */
657         writel(reg_addr + 4, pcie_index_offset);
658         readl(pcie_index_offset);
659         r |= ((u64)readl(pcie_data_offset) << 32);
660         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
661
662         return r;
663 }
664
665 /**
666  * amdgpu_device_indirect_wreg - write an indirect register address
667  *
668  * @adev: amdgpu_device pointer
669  * @pcie_index: mmio register offset
670  * @pcie_data: mmio register offset
671  * @reg_addr: indirect register offset
672  * @reg_data: indirect register data
673  *
674  */
675 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
676                                  u32 pcie_index, u32 pcie_data,
677                                  u32 reg_addr, u32 reg_data)
678 {
679         unsigned long flags;
680         void __iomem *pcie_index_offset;
681         void __iomem *pcie_data_offset;
682
683         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
684         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
685         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
686
687         writel(reg_addr, pcie_index_offset);
688         readl(pcie_index_offset);
689         writel(reg_data, pcie_data_offset);
690         readl(pcie_data_offset);
691         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
692 }
693
694 /**
695  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
696  *
697  * @adev: amdgpu_device pointer
698  * @pcie_index: mmio register offset
699  * @pcie_data: mmio register offset
700  * @reg_addr: indirect register offset
701  * @reg_data: indirect register data
702  *
703  */
704 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
705                                    u32 pcie_index, u32 pcie_data,
706                                    u32 reg_addr, u64 reg_data)
707 {
708         unsigned long flags;
709         void __iomem *pcie_index_offset;
710         void __iomem *pcie_data_offset;
711
712         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716         /* write low 32 bits */
717         writel(reg_addr, pcie_index_offset);
718         readl(pcie_index_offset);
719         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
720         readl(pcie_data_offset);
721         /* write high 32 bits */
722         writel(reg_addr + 4, pcie_index_offset);
723         readl(pcie_index_offset);
724         writel((u32)(reg_data >> 32), pcie_data_offset);
725         readl(pcie_data_offset);
726         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
727 }
728
729 /**
730  * amdgpu_invalid_rreg - dummy reg read function
731  *
732  * @adev: amdgpu_device pointer
733  * @reg: offset of register
734  *
735  * Dummy register read function.  Used for register blocks
736  * that certain asics don't have (all asics).
737  * Returns the value in the register.
738  */
739 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
740 {
741         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
742         BUG();
743         return 0;
744 }
745
746 /**
747  * amdgpu_invalid_wreg - dummy reg write function
748  *
749  * @adev: amdgpu_device pointer
750  * @reg: offset of register
751  * @v: value to write to the register
752  *
753  * Dummy register read function.  Used for register blocks
754  * that certain asics don't have (all asics).
755  */
756 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
757 {
758         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
759                   reg, v);
760         BUG();
761 }
762
763 /**
764  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
765  *
766  * @adev: amdgpu_device pointer
767  * @reg: offset of register
768  *
769  * Dummy register read function.  Used for register blocks
770  * that certain asics don't have (all asics).
771  * Returns the value in the register.
772  */
773 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
774 {
775         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
776         BUG();
777         return 0;
778 }
779
780 /**
781  * amdgpu_invalid_wreg64 - dummy reg write function
782  *
783  * @adev: amdgpu_device pointer
784  * @reg: offset of register
785  * @v: value to write to the register
786  *
787  * Dummy register read function.  Used for register blocks
788  * that certain asics don't have (all asics).
789  */
790 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
791 {
792         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
793                   reg, v);
794         BUG();
795 }
796
797 /**
798  * amdgpu_block_invalid_rreg - dummy reg read function
799  *
800  * @adev: amdgpu_device pointer
801  * @block: offset of instance
802  * @reg: offset of register
803  *
804  * Dummy register read function.  Used for register blocks
805  * that certain asics don't have (all asics).
806  * Returns the value in the register.
807  */
808 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
809                                           uint32_t block, uint32_t reg)
810 {
811         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
812                   reg, block);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_block_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @block: offset of instance
822  * @reg: offset of register
823  * @v: value to write to the register
824  *
825  * Dummy register read function.  Used for register blocks
826  * that certain asics don't have (all asics).
827  */
828 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
829                                       uint32_t block,
830                                       uint32_t reg, uint32_t v)
831 {
832         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
833                   reg, block, v);
834         BUG();
835 }
836
837 /**
838  * amdgpu_device_asic_init - Wrapper for atom asic_init
839  *
840  * @adev: amdgpu_device pointer
841  *
842  * Does any asic specific work and then calls atom asic init.
843  */
844 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
845 {
846         amdgpu_asic_pre_asic_init(adev);
847
848         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
849 }
850
851 /**
852  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
853  *
854  * @adev: amdgpu_device pointer
855  *
856  * Allocates a scratch page of VRAM for use by various things in the
857  * driver.
858  */
859 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
860 {
861         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
862                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
863                                        &adev->vram_scratch.robj,
864                                        &adev->vram_scratch.gpu_addr,
865                                        (void **)&adev->vram_scratch.ptr);
866 }
867
868 /**
869  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
870  *
871  * @adev: amdgpu_device pointer
872  *
873  * Frees the VRAM scratch page.
874  */
875 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
876 {
877         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
878 }
879
880 /**
881  * amdgpu_device_program_register_sequence - program an array of registers.
882  *
883  * @adev: amdgpu_device pointer
884  * @registers: pointer to the register array
885  * @array_size: size of the register array
886  *
887  * Programs an array or registers with and and or masks.
888  * This is a helper for setting golden registers.
889  */
890 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
891                                              const u32 *registers,
892                                              const u32 array_size)
893 {
894         u32 tmp, reg, and_mask, or_mask;
895         int i;
896
897         if (array_size % 3)
898                 return;
899
900         for (i = 0; i < array_size; i +=3) {
901                 reg = registers[i + 0];
902                 and_mask = registers[i + 1];
903                 or_mask = registers[i + 2];
904
905                 if (and_mask == 0xffffffff) {
906                         tmp = or_mask;
907                 } else {
908                         tmp = RREG32(reg);
909                         tmp &= ~and_mask;
910                         if (adev->family >= AMDGPU_FAMILY_AI)
911                                 tmp |= (or_mask & and_mask);
912                         else
913                                 tmp |= or_mask;
914                 }
915                 WREG32(reg, tmp);
916         }
917 }
918
919 /**
920  * amdgpu_device_pci_config_reset - reset the GPU
921  *
922  * @adev: amdgpu_device pointer
923  *
924  * Resets the GPU using the pci config reset sequence.
925  * Only applicable to asics prior to vega10.
926  */
927 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
928 {
929         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
930 }
931
932 /**
933  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
938  */
939 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
940 {
941         return pci_reset_function(adev->pdev);
942 }
943
944 /*
945  * GPU doorbell aperture helpers function.
946  */
947 /**
948  * amdgpu_device_doorbell_init - Init doorbell driver information.
949  *
950  * @adev: amdgpu_device pointer
951  *
952  * Init doorbell driver information (CIK)
953  * Returns 0 on success, error on failure.
954  */
955 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
956 {
957
958         /* No doorbell on SI hardware generation */
959         if (adev->asic_type < CHIP_BONAIRE) {
960                 adev->doorbell.base = 0;
961                 adev->doorbell.size = 0;
962                 adev->doorbell.num_doorbells = 0;
963                 adev->doorbell.ptr = NULL;
964                 return 0;
965         }
966
967         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
968                 return -EINVAL;
969
970         amdgpu_asic_init_doorbell_index(adev);
971
972         /* doorbell bar mapping */
973         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
974         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
975
976         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
977                                              adev->doorbell_index.max_assignment+1);
978         if (adev->doorbell.num_doorbells == 0)
979                 return -EINVAL;
980
981         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
982          * paging queue doorbell use the second page. The
983          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
984          * doorbells are in the first page. So with paging queue enabled,
985          * the max num_doorbells should + 1 page (0x400 in dword)
986          */
987         if (adev->asic_type >= CHIP_VEGA10)
988                 adev->doorbell.num_doorbells += 0x400;
989
990         adev->doorbell.ptr = ioremap(adev->doorbell.base,
991                                      adev->doorbell.num_doorbells *
992                                      sizeof(u32));
993         if (adev->doorbell.ptr == NULL)
994                 return -ENOMEM;
995
996         return 0;
997 }
998
999 /**
1000  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1001  *
1002  * @adev: amdgpu_device pointer
1003  *
1004  * Tear down doorbell driver information (CIK)
1005  */
1006 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1007 {
1008         iounmap(adev->doorbell.ptr);
1009         adev->doorbell.ptr = NULL;
1010 }
1011
1012
1013
1014 /*
1015  * amdgpu_device_wb_*()
1016  * Writeback is the method by which the GPU updates special pages in memory
1017  * with the status of certain GPU events (fences, ring pointers,etc.).
1018  */
1019
1020 /**
1021  * amdgpu_device_wb_fini - Disable Writeback and free memory
1022  *
1023  * @adev: amdgpu_device pointer
1024  *
1025  * Disables Writeback and frees the Writeback memory (all asics).
1026  * Used at driver shutdown.
1027  */
1028 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1029 {
1030         if (adev->wb.wb_obj) {
1031                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1032                                       &adev->wb.gpu_addr,
1033                                       (void **)&adev->wb.wb);
1034                 adev->wb.wb_obj = NULL;
1035         }
1036 }
1037
1038 /**
1039  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1040  *
1041  * @adev: amdgpu_device pointer
1042  *
1043  * Initializes writeback and allocates writeback memory (all asics).
1044  * Used at driver startup.
1045  * Returns 0 on success or an -error on failure.
1046  */
1047 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1048 {
1049         int r;
1050
1051         if (adev->wb.wb_obj == NULL) {
1052                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1053                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1054                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1055                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1056                                             (void **)&adev->wb.wb);
1057                 if (r) {
1058                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1059                         return r;
1060                 }
1061
1062                 adev->wb.num_wb = AMDGPU_MAX_WB;
1063                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1064
1065                 /* clear wb memory */
1066                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1067         }
1068
1069         return 0;
1070 }
1071
1072 /**
1073  * amdgpu_device_wb_get - Allocate a wb entry
1074  *
1075  * @adev: amdgpu_device pointer
1076  * @wb: wb index
1077  *
1078  * Allocate a wb slot for use by the driver (all asics).
1079  * Returns 0 on success or -EINVAL on failure.
1080  */
1081 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1082 {
1083         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1084
1085         if (offset < adev->wb.num_wb) {
1086                 __set_bit(offset, adev->wb.used);
1087                 *wb = offset << 3; /* convert to dw offset */
1088                 return 0;
1089         } else {
1090                 return -EINVAL;
1091         }
1092 }
1093
1094 /**
1095  * amdgpu_device_wb_free - Free a wb entry
1096  *
1097  * @adev: amdgpu_device pointer
1098  * @wb: wb index
1099  *
1100  * Free a wb slot allocated for use by the driver (all asics)
1101  */
1102 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1103 {
1104         wb >>= 3;
1105         if (wb < adev->wb.num_wb)
1106                 __clear_bit(wb, adev->wb.used);
1107 }
1108
1109 /**
1110  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1111  *
1112  * @adev: amdgpu_device pointer
1113  *
1114  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1115  * to fail, but if any of the BARs is not accessible after the size we abort
1116  * driver loading by returning -ENODEV.
1117  */
1118 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1119 {
1120         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1121         struct pci_bus *root;
1122         struct resource *res;
1123         unsigned i;
1124         u16 cmd;
1125         int r;
1126
1127         /* Bypass for VF */
1128         if (amdgpu_sriov_vf(adev))
1129                 return 0;
1130
1131         /* skip if the bios has already enabled large BAR */
1132         if (adev->gmc.real_vram_size &&
1133             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1134                 return 0;
1135
1136         /* Check if the root BUS has 64bit memory resources */
1137         root = adev->pdev->bus;
1138         while (root->parent)
1139                 root = root->parent;
1140
1141         pci_bus_for_each_resource(root, res, i) {
1142                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1143                     res->start > 0x100000000ull)
1144                         break;
1145         }
1146
1147         /* Trying to resize is pointless without a root hub window above 4GB */
1148         if (!res)
1149                 return 0;
1150
1151         /* Limit the BAR size to what is available */
1152         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1153                         rbar_size);
1154
1155         /* Disable memory decoding while we change the BAR addresses and size */
1156         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1157         pci_write_config_word(adev->pdev, PCI_COMMAND,
1158                               cmd & ~PCI_COMMAND_MEMORY);
1159
1160         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1161         amdgpu_device_doorbell_fini(adev);
1162         if (adev->asic_type >= CHIP_BONAIRE)
1163                 pci_release_resource(adev->pdev, 2);
1164
1165         pci_release_resource(adev->pdev, 0);
1166
1167         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1168         if (r == -ENOSPC)
1169                 DRM_INFO("Not enough PCI address space for a large BAR.");
1170         else if (r && r != -ENOTSUPP)
1171                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1172
1173         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1174
1175         /* When the doorbell or fb BAR isn't available we have no chance of
1176          * using the device.
1177          */
1178         r = amdgpu_device_doorbell_init(adev);
1179         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1180                 return -ENODEV;
1181
1182         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1183
1184         return 0;
1185 }
1186
1187 /*
1188  * GPU helpers function.
1189  */
1190 /**
1191  * amdgpu_device_need_post - check if the hw need post or not
1192  *
1193  * @adev: amdgpu_device pointer
1194  *
1195  * Check if the asic has been initialized (all asics) at driver startup
1196  * or post is needed if  hw reset is performed.
1197  * Returns true if need or false if not.
1198  */
1199 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1200 {
1201         uint32_t reg;
1202
1203         if (amdgpu_sriov_vf(adev))
1204                 return false;
1205
1206         if (amdgpu_passthrough(adev)) {
1207                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1208                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1209                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1210                  * vpost executed for smc version below 22.15
1211                  */
1212                 if (adev->asic_type == CHIP_FIJI) {
1213                         int err;
1214                         uint32_t fw_ver;
1215                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1216                         /* force vPost if error occured */
1217                         if (err)
1218                                 return true;
1219
1220                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1221                         if (fw_ver < 0x00160e00)
1222                                 return true;
1223                 }
1224         }
1225
1226         if (adev->has_hw_reset) {
1227                 adev->has_hw_reset = false;
1228                 return true;
1229         }
1230
1231         /* bios scratch used on CIK+ */
1232         if (adev->asic_type >= CHIP_BONAIRE)
1233                 return amdgpu_atombios_scratch_need_asic_init(adev);
1234
1235         /* check MEM_SIZE for older asics */
1236         reg = amdgpu_asic_get_config_memsize(adev);
1237
1238         if ((reg != 0) && (reg != 0xffffffff))
1239                 return false;
1240
1241         return true;
1242 }
1243
1244 /* if we get transitioned to only one device, take VGA back */
1245 /**
1246  * amdgpu_device_vga_set_decode - enable/disable vga decode
1247  *
1248  * @cookie: amdgpu_device pointer
1249  * @state: enable/disable vga decode
1250  *
1251  * Enable/disable vga decode (all asics).
1252  * Returns VGA resource flags.
1253  */
1254 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1255 {
1256         struct amdgpu_device *adev = cookie;
1257         amdgpu_asic_set_vga_state(adev, state);
1258         if (state)
1259                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1260                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1261         else
1262                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1263 }
1264
1265 /**
1266  * amdgpu_device_check_block_size - validate the vm block size
1267  *
1268  * @adev: amdgpu_device pointer
1269  *
1270  * Validates the vm block size specified via module parameter.
1271  * The vm block size defines number of bits in page table versus page directory,
1272  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1273  * page table and the remaining bits are in the page directory.
1274  */
1275 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1276 {
1277         /* defines number of bits in page table versus page directory,
1278          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1279          * page table and the remaining bits are in the page directory */
1280         if (amdgpu_vm_block_size == -1)
1281                 return;
1282
1283         if (amdgpu_vm_block_size < 9) {
1284                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1285                          amdgpu_vm_block_size);
1286                 amdgpu_vm_block_size = -1;
1287         }
1288 }
1289
1290 /**
1291  * amdgpu_device_check_vm_size - validate the vm size
1292  *
1293  * @adev: amdgpu_device pointer
1294  *
1295  * Validates the vm size in GB specified via module parameter.
1296  * The VM size is the size of the GPU virtual memory space in GB.
1297  */
1298 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1299 {
1300         /* no need to check the default value */
1301         if (amdgpu_vm_size == -1)
1302                 return;
1303
1304         if (amdgpu_vm_size < 1) {
1305                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1306                          amdgpu_vm_size);
1307                 amdgpu_vm_size = -1;
1308         }
1309 }
1310
1311 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1312 {
1313         struct sysinfo si;
1314         bool is_os_64 = (sizeof(void *) == 8);
1315         uint64_t total_memory;
1316         uint64_t dram_size_seven_GB = 0x1B8000000;
1317         uint64_t dram_size_three_GB = 0xB8000000;
1318
1319         if (amdgpu_smu_memory_pool_size == 0)
1320                 return;
1321
1322         if (!is_os_64) {
1323                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1324                 goto def_value;
1325         }
1326         si_meminfo(&si);
1327         total_memory = (uint64_t)si.totalram * si.mem_unit;
1328
1329         if ((amdgpu_smu_memory_pool_size == 1) ||
1330                 (amdgpu_smu_memory_pool_size == 2)) {
1331                 if (total_memory < dram_size_three_GB)
1332                         goto def_value1;
1333         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1334                 (amdgpu_smu_memory_pool_size == 8)) {
1335                 if (total_memory < dram_size_seven_GB)
1336                         goto def_value1;
1337         } else {
1338                 DRM_WARN("Smu memory pool size not supported\n");
1339                 goto def_value;
1340         }
1341         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1342
1343         return;
1344
1345 def_value1:
1346         DRM_WARN("No enough system memory\n");
1347 def_value:
1348         adev->pm.smu_prv_buffer_size = 0;
1349 }
1350
1351 /**
1352  * amdgpu_device_check_arguments - validate module params
1353  *
1354  * @adev: amdgpu_device pointer
1355  *
1356  * Validates certain module parameters and updates
1357  * the associated values used by the driver (all asics).
1358  */
1359 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1360 {
1361         if (amdgpu_sched_jobs < 4) {
1362                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1363                          amdgpu_sched_jobs);
1364                 amdgpu_sched_jobs = 4;
1365         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1366                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1367                          amdgpu_sched_jobs);
1368                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1369         }
1370
1371         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1372                 /* gart size must be greater or equal to 32M */
1373                 dev_warn(adev->dev, "gart size (%d) too small\n",
1374                          amdgpu_gart_size);
1375                 amdgpu_gart_size = -1;
1376         }
1377
1378         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1379                 /* gtt size must be greater or equal to 32M */
1380                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1381                                  amdgpu_gtt_size);
1382                 amdgpu_gtt_size = -1;
1383         }
1384
1385         /* valid range is between 4 and 9 inclusive */
1386         if (amdgpu_vm_fragment_size != -1 &&
1387             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1388                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1389                 amdgpu_vm_fragment_size = -1;
1390         }
1391
1392         if (amdgpu_sched_hw_submission < 2) {
1393                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1394                          amdgpu_sched_hw_submission);
1395                 amdgpu_sched_hw_submission = 2;
1396         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1397                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1398                          amdgpu_sched_hw_submission);
1399                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1400         }
1401
1402         amdgpu_device_check_smu_prv_buffer_size(adev);
1403
1404         amdgpu_device_check_vm_size(adev);
1405
1406         amdgpu_device_check_block_size(adev);
1407
1408         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1409
1410         amdgpu_gmc_tmz_set(adev);
1411
1412         amdgpu_gmc_noretry_set(adev);
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * amdgpu_switcheroo_set_state - set switcheroo state
1419  *
1420  * @pdev: pci dev pointer
1421  * @state: vga_switcheroo state
1422  *
1423  * Callback for the switcheroo driver.  Suspends or resumes the
1424  * the asics before or after it is powered up using ACPI methods.
1425  */
1426 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1427                                         enum vga_switcheroo_state state)
1428 {
1429         struct drm_device *dev = pci_get_drvdata(pdev);
1430         int r;
1431
1432         if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
1433                 return;
1434
1435         if (state == VGA_SWITCHEROO_ON) {
1436                 pr_info("switched on\n");
1437                 /* don't suspend or resume card normally */
1438                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1439
1440                 pci_set_power_state(pdev, PCI_D0);
1441                 amdgpu_device_load_pci_state(pdev);
1442                 r = pci_enable_device(pdev);
1443                 if (r)
1444                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1445                 amdgpu_device_resume(dev, true);
1446
1447                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1448         } else {
1449                 pr_info("switched off\n");
1450                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1451                 amdgpu_device_suspend(dev, true);
1452                 amdgpu_device_cache_pci_state(pdev);
1453                 /* Shut down the device */
1454                 pci_disable_device(pdev);
1455                 pci_set_power_state(pdev, PCI_D3cold);
1456                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1457         }
1458 }
1459
1460 /**
1461  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1462  *
1463  * @pdev: pci dev pointer
1464  *
1465  * Callback for the switcheroo driver.  Check of the switcheroo
1466  * state can be changed.
1467  * Returns true if the state can be changed, false if not.
1468  */
1469 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1470 {
1471         struct drm_device *dev = pci_get_drvdata(pdev);
1472
1473         /*
1474         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1475         * locking inversion with the driver load path. And the access here is
1476         * completely racy anyway. So don't bother with locking for now.
1477         */
1478         return atomic_read(&dev->open_count) == 0;
1479 }
1480
1481 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1482         .set_gpu_state = amdgpu_switcheroo_set_state,
1483         .reprobe = NULL,
1484         .can_switch = amdgpu_switcheroo_can_switch,
1485 };
1486
1487 /**
1488  * amdgpu_device_ip_set_clockgating_state - set the CG state
1489  *
1490  * @dev: amdgpu_device pointer
1491  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1492  * @state: clockgating state (gate or ungate)
1493  *
1494  * Sets the requested clockgating state for all instances of
1495  * the hardware IP specified.
1496  * Returns the error code from the last instance.
1497  */
1498 int amdgpu_device_ip_set_clockgating_state(void *dev,
1499                                            enum amd_ip_block_type block_type,
1500                                            enum amd_clockgating_state state)
1501 {
1502         struct amdgpu_device *adev = dev;
1503         int i, r = 0;
1504
1505         for (i = 0; i < adev->num_ip_blocks; i++) {
1506                 if (!adev->ip_blocks[i].status.valid)
1507                         continue;
1508                 if (adev->ip_blocks[i].version->type != block_type)
1509                         continue;
1510                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1511                         continue;
1512                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1513                         (void *)adev, state);
1514                 if (r)
1515                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1516                                   adev->ip_blocks[i].version->funcs->name, r);
1517         }
1518         return r;
1519 }
1520
1521 /**
1522  * amdgpu_device_ip_set_powergating_state - set the PG state
1523  *
1524  * @dev: amdgpu_device pointer
1525  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1526  * @state: powergating state (gate or ungate)
1527  *
1528  * Sets the requested powergating state for all instances of
1529  * the hardware IP specified.
1530  * Returns the error code from the last instance.
1531  */
1532 int amdgpu_device_ip_set_powergating_state(void *dev,
1533                                            enum amd_ip_block_type block_type,
1534                                            enum amd_powergating_state state)
1535 {
1536         struct amdgpu_device *adev = dev;
1537         int i, r = 0;
1538
1539         for (i = 0; i < adev->num_ip_blocks; i++) {
1540                 if (!adev->ip_blocks[i].status.valid)
1541                         continue;
1542                 if (adev->ip_blocks[i].version->type != block_type)
1543                         continue;
1544                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1545                         continue;
1546                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1547                         (void *)adev, state);
1548                 if (r)
1549                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1550                                   adev->ip_blocks[i].version->funcs->name, r);
1551         }
1552         return r;
1553 }
1554
1555 /**
1556  * amdgpu_device_ip_get_clockgating_state - get the CG state
1557  *
1558  * @adev: amdgpu_device pointer
1559  * @flags: clockgating feature flags
1560  *
1561  * Walks the list of IPs on the device and updates the clockgating
1562  * flags for each IP.
1563  * Updates @flags with the feature flags for each hardware IP where
1564  * clockgating is enabled.
1565  */
1566 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1567                                             u32 *flags)
1568 {
1569         int i;
1570
1571         for (i = 0; i < adev->num_ip_blocks; i++) {
1572                 if (!adev->ip_blocks[i].status.valid)
1573                         continue;
1574                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1575                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1576         }
1577 }
1578
1579 /**
1580  * amdgpu_device_ip_wait_for_idle - wait for idle
1581  *
1582  * @adev: amdgpu_device pointer
1583  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1584  *
1585  * Waits for the request hardware IP to be idle.
1586  * Returns 0 for success or a negative error code on failure.
1587  */
1588 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1589                                    enum amd_ip_block_type block_type)
1590 {
1591         int i, r;
1592
1593         for (i = 0; i < adev->num_ip_blocks; i++) {
1594                 if (!adev->ip_blocks[i].status.valid)
1595                         continue;
1596                 if (adev->ip_blocks[i].version->type == block_type) {
1597                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1598                         if (r)
1599                                 return r;
1600                         break;
1601                 }
1602         }
1603         return 0;
1604
1605 }
1606
1607 /**
1608  * amdgpu_device_ip_is_idle - is the hardware IP idle
1609  *
1610  * @adev: amdgpu_device pointer
1611  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1612  *
1613  * Check if the hardware IP is idle or not.
1614  * Returns true if it the IP is idle, false if not.
1615  */
1616 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1617                               enum amd_ip_block_type block_type)
1618 {
1619         int i;
1620
1621         for (i = 0; i < adev->num_ip_blocks; i++) {
1622                 if (!adev->ip_blocks[i].status.valid)
1623                         continue;
1624                 if (adev->ip_blocks[i].version->type == block_type)
1625                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1626         }
1627         return true;
1628
1629 }
1630
1631 /**
1632  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1633  *
1634  * @adev: amdgpu_device pointer
1635  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1636  *
1637  * Returns a pointer to the hardware IP block structure
1638  * if it exists for the asic, otherwise NULL.
1639  */
1640 struct amdgpu_ip_block *
1641 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1642                               enum amd_ip_block_type type)
1643 {
1644         int i;
1645
1646         for (i = 0; i < adev->num_ip_blocks; i++)
1647                 if (adev->ip_blocks[i].version->type == type)
1648                         return &adev->ip_blocks[i];
1649
1650         return NULL;
1651 }
1652
1653 /**
1654  * amdgpu_device_ip_block_version_cmp
1655  *
1656  * @adev: amdgpu_device pointer
1657  * @type: enum amd_ip_block_type
1658  * @major: major version
1659  * @minor: minor version
1660  *
1661  * return 0 if equal or greater
1662  * return 1 if smaller or the ip_block doesn't exist
1663  */
1664 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1665                                        enum amd_ip_block_type type,
1666                                        u32 major, u32 minor)
1667 {
1668         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1669
1670         if (ip_block && ((ip_block->version->major > major) ||
1671                         ((ip_block->version->major == major) &&
1672                         (ip_block->version->minor >= minor))))
1673                 return 0;
1674
1675         return 1;
1676 }
1677
1678 /**
1679  * amdgpu_device_ip_block_add
1680  *
1681  * @adev: amdgpu_device pointer
1682  * @ip_block_version: pointer to the IP to add
1683  *
1684  * Adds the IP block driver information to the collection of IPs
1685  * on the asic.
1686  */
1687 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1688                                const struct amdgpu_ip_block_version *ip_block_version)
1689 {
1690         if (!ip_block_version)
1691                 return -EINVAL;
1692
1693         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1694                   ip_block_version->funcs->name);
1695
1696         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1697
1698         return 0;
1699 }
1700
1701 /**
1702  * amdgpu_device_enable_virtual_display - enable virtual display feature
1703  *
1704  * @adev: amdgpu_device pointer
1705  *
1706  * Enabled the virtual display feature if the user has enabled it via
1707  * the module parameter virtual_display.  This feature provides a virtual
1708  * display hardware on headless boards or in virtualized environments.
1709  * This function parses and validates the configuration string specified by
1710  * the user and configues the virtual display configuration (number of
1711  * virtual connectors, crtcs, etc.) specified.
1712  */
1713 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1714 {
1715         adev->enable_virtual_display = false;
1716
1717         if (amdgpu_virtual_display) {
1718                 const char *pci_address_name = pci_name(adev->pdev);
1719                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1720
1721                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1722                 pciaddstr_tmp = pciaddstr;
1723                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1724                         pciaddname = strsep(&pciaddname_tmp, ",");
1725                         if (!strcmp("all", pciaddname)
1726                             || !strcmp(pci_address_name, pciaddname)) {
1727                                 long num_crtc;
1728                                 int res = -1;
1729
1730                                 adev->enable_virtual_display = true;
1731
1732                                 if (pciaddname_tmp)
1733                                         res = kstrtol(pciaddname_tmp, 10,
1734                                                       &num_crtc);
1735
1736                                 if (!res) {
1737                                         if (num_crtc < 1)
1738                                                 num_crtc = 1;
1739                                         if (num_crtc > 6)
1740                                                 num_crtc = 6;
1741                                         adev->mode_info.num_crtc = num_crtc;
1742                                 } else {
1743                                         adev->mode_info.num_crtc = 1;
1744                                 }
1745                                 break;
1746                         }
1747                 }
1748
1749                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1750                          amdgpu_virtual_display, pci_address_name,
1751                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1752
1753                 kfree(pciaddstr);
1754         }
1755 }
1756
1757 /**
1758  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1759  *
1760  * @adev: amdgpu_device pointer
1761  *
1762  * Parses the asic configuration parameters specified in the gpu info
1763  * firmware and makes them availale to the driver for use in configuring
1764  * the asic.
1765  * Returns 0 on success, -EINVAL on failure.
1766  */
1767 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1768 {
1769         const char *chip_name;
1770         char fw_name[40];
1771         int err;
1772         const struct gpu_info_firmware_header_v1_0 *hdr;
1773
1774         adev->firmware.gpu_info_fw = NULL;
1775
1776         if (adev->mman.discovery_bin) {
1777                 amdgpu_discovery_get_gfx_info(adev);
1778
1779                 /*
1780                  * FIXME: The bounding box is still needed by Navi12, so
1781                  * temporarily read it from gpu_info firmware. Should be droped
1782                  * when DAL no longer needs it.
1783                  */
1784                 if (adev->asic_type != CHIP_NAVI12)
1785                         return 0;
1786         }
1787
1788         switch (adev->asic_type) {
1789 #ifdef CONFIG_DRM_AMDGPU_SI
1790         case CHIP_VERDE:
1791         case CHIP_TAHITI:
1792         case CHIP_PITCAIRN:
1793         case CHIP_OLAND:
1794         case CHIP_HAINAN:
1795 #endif
1796 #ifdef CONFIG_DRM_AMDGPU_CIK
1797         case CHIP_BONAIRE:
1798         case CHIP_HAWAII:
1799         case CHIP_KAVERI:
1800         case CHIP_KABINI:
1801         case CHIP_MULLINS:
1802 #endif
1803         case CHIP_TOPAZ:
1804         case CHIP_TONGA:
1805         case CHIP_FIJI:
1806         case CHIP_POLARIS10:
1807         case CHIP_POLARIS11:
1808         case CHIP_POLARIS12:
1809         case CHIP_VEGAM:
1810         case CHIP_CARRIZO:
1811         case CHIP_STONEY:
1812         case CHIP_VEGA20:
1813         case CHIP_SIENNA_CICHLID:
1814         case CHIP_NAVY_FLOUNDER:
1815         case CHIP_DIMGREY_CAVEFISH:
1816         default:
1817                 return 0;
1818         case CHIP_VEGA10:
1819                 chip_name = "vega10";
1820                 break;
1821         case CHIP_VEGA12:
1822                 chip_name = "vega12";
1823                 break;
1824         case CHIP_RAVEN:
1825                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1826                         chip_name = "raven2";
1827                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1828                         chip_name = "picasso";
1829                 else
1830                         chip_name = "raven";
1831                 break;
1832         case CHIP_ARCTURUS:
1833                 chip_name = "arcturus";
1834                 break;
1835         case CHIP_RENOIR:
1836                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1837                         chip_name = "renoir";
1838                 else
1839                         chip_name = "green_sardine";
1840                 break;
1841         case CHIP_NAVI10:
1842                 chip_name = "navi10";
1843                 break;
1844         case CHIP_NAVI14:
1845                 chip_name = "navi14";
1846                 break;
1847         case CHIP_NAVI12:
1848                 chip_name = "navi12";
1849                 break;
1850         case CHIP_VANGOGH:
1851                 chip_name = "vangogh";
1852                 break;
1853         }
1854
1855         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1856         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1857         if (err) {
1858                 dev_err(adev->dev,
1859                         "Failed to load gpu_info firmware \"%s\"\n",
1860                         fw_name);
1861                 goto out;
1862         }
1863         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1864         if (err) {
1865                 dev_err(adev->dev,
1866                         "Failed to validate gpu_info firmware \"%s\"\n",
1867                         fw_name);
1868                 goto out;
1869         }
1870
1871         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1872         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1873
1874         switch (hdr->version_major) {
1875         case 1:
1876         {
1877                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1878                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1879                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1880
1881                 /*
1882                  * Should be droped when DAL no longer needs it.
1883                  */
1884                 if (adev->asic_type == CHIP_NAVI12)
1885                         goto parse_soc_bounding_box;
1886
1887                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1888                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1889                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1890                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1891                 adev->gfx.config.max_texture_channel_caches =
1892                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1893                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1894                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1895                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1896                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1897                 adev->gfx.config.double_offchip_lds_buf =
1898                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1899                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1900                 adev->gfx.cu_info.max_waves_per_simd =
1901                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1902                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1903                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1904                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1905                 if (hdr->version_minor >= 1) {
1906                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1907                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1908                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1909                         adev->gfx.config.num_sc_per_sh =
1910                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1911                         adev->gfx.config.num_packer_per_sc =
1912                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1913                 }
1914
1915 parse_soc_bounding_box:
1916                 /*
1917                  * soc bounding box info is not integrated in disocovery table,
1918                  * we always need to parse it from gpu info firmware if needed.
1919                  */
1920                 if (hdr->version_minor == 2) {
1921                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1922                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1923                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1924                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1925                 }
1926                 break;
1927         }
1928         default:
1929                 dev_err(adev->dev,
1930                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1931                 err = -EINVAL;
1932                 goto out;
1933         }
1934 out:
1935         return err;
1936 }
1937
1938 /**
1939  * amdgpu_device_ip_early_init - run early init for hardware IPs
1940  *
1941  * @adev: amdgpu_device pointer
1942  *
1943  * Early initialization pass for hardware IPs.  The hardware IPs that make
1944  * up each asic are discovered each IP's early_init callback is run.  This
1945  * is the first stage in initializing the asic.
1946  * Returns 0 on success, negative error code on failure.
1947  */
1948 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1949 {
1950         int i, r;
1951
1952         amdgpu_device_enable_virtual_display(adev);
1953
1954         if (amdgpu_sriov_vf(adev)) {
1955                 r = amdgpu_virt_request_full_gpu(adev, true);
1956                 if (r)
1957                         return r;
1958         }
1959
1960         switch (adev->asic_type) {
1961 #ifdef CONFIG_DRM_AMDGPU_SI
1962         case CHIP_VERDE:
1963         case CHIP_TAHITI:
1964         case CHIP_PITCAIRN:
1965         case CHIP_OLAND:
1966         case CHIP_HAINAN:
1967                 adev->family = AMDGPU_FAMILY_SI;
1968                 r = si_set_ip_blocks(adev);
1969                 if (r)
1970                         return r;
1971                 break;
1972 #endif
1973 #ifdef CONFIG_DRM_AMDGPU_CIK
1974         case CHIP_BONAIRE:
1975         case CHIP_HAWAII:
1976         case CHIP_KAVERI:
1977         case CHIP_KABINI:
1978         case CHIP_MULLINS:
1979                 if (adev->flags & AMD_IS_APU)
1980                         adev->family = AMDGPU_FAMILY_KV;
1981                 else
1982                         adev->family = AMDGPU_FAMILY_CI;
1983
1984                 r = cik_set_ip_blocks(adev);
1985                 if (r)
1986                         return r;
1987                 break;
1988 #endif
1989         case CHIP_TOPAZ:
1990         case CHIP_TONGA:
1991         case CHIP_FIJI:
1992         case CHIP_POLARIS10:
1993         case CHIP_POLARIS11:
1994         case CHIP_POLARIS12:
1995         case CHIP_VEGAM:
1996         case CHIP_CARRIZO:
1997         case CHIP_STONEY:
1998                 if (adev->flags & AMD_IS_APU)
1999                         adev->family = AMDGPU_FAMILY_CZ;
2000                 else
2001                         adev->family = AMDGPU_FAMILY_VI;
2002
2003                 r = vi_set_ip_blocks(adev);
2004                 if (r)
2005                         return r;
2006                 break;
2007         case CHIP_VEGA10:
2008         case CHIP_VEGA12:
2009         case CHIP_VEGA20:
2010         case CHIP_RAVEN:
2011         case CHIP_ARCTURUS:
2012         case CHIP_RENOIR:
2013                 if (adev->flags & AMD_IS_APU)
2014                         adev->family = AMDGPU_FAMILY_RV;
2015                 else
2016                         adev->family = AMDGPU_FAMILY_AI;
2017
2018                 r = soc15_set_ip_blocks(adev);
2019                 if (r)
2020                         return r;
2021                 break;
2022         case  CHIP_NAVI10:
2023         case  CHIP_NAVI14:
2024         case  CHIP_NAVI12:
2025         case  CHIP_SIENNA_CICHLID:
2026         case  CHIP_NAVY_FLOUNDER:
2027         case  CHIP_DIMGREY_CAVEFISH:
2028         case CHIP_VANGOGH:
2029                 if (adev->asic_type == CHIP_VANGOGH)
2030                         adev->family = AMDGPU_FAMILY_VGH;
2031                 else
2032                         adev->family = AMDGPU_FAMILY_NV;
2033
2034                 r = nv_set_ip_blocks(adev);
2035                 if (r)
2036                         return r;
2037                 break;
2038         default:
2039                 /* FIXME: not supported yet */
2040                 return -EINVAL;
2041         }
2042
2043         amdgpu_amdkfd_device_probe(adev);
2044
2045         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2046         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2047                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2048
2049         for (i = 0; i < adev->num_ip_blocks; i++) {
2050                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2051                         DRM_ERROR("disabled ip block: %d <%s>\n",
2052                                   i, adev->ip_blocks[i].version->funcs->name);
2053                         adev->ip_blocks[i].status.valid = false;
2054                 } else {
2055                         if (adev->ip_blocks[i].version->funcs->early_init) {
2056                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2057                                 if (r == -ENOENT) {
2058                                         adev->ip_blocks[i].status.valid = false;
2059                                 } else if (r) {
2060                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2061                                                   adev->ip_blocks[i].version->funcs->name, r);
2062                                         return r;
2063                                 } else {
2064                                         adev->ip_blocks[i].status.valid = true;
2065                                 }
2066                         } else {
2067                                 adev->ip_blocks[i].status.valid = true;
2068                         }
2069                 }
2070                 /* get the vbios after the asic_funcs are set up */
2071                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2072                         r = amdgpu_device_parse_gpu_info_fw(adev);
2073                         if (r)
2074                                 return r;
2075
2076                         /* Read BIOS */
2077                         if (!amdgpu_get_bios(adev))
2078                                 return -EINVAL;
2079
2080                         r = amdgpu_atombios_init(adev);
2081                         if (r) {
2082                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2083                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2084                                 return r;
2085                         }
2086                 }
2087         }
2088
2089         adev->cg_flags &= amdgpu_cg_mask;
2090         adev->pg_flags &= amdgpu_pg_mask;
2091
2092         return 0;
2093 }
2094
2095 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2096 {
2097         int i, r;
2098
2099         for (i = 0; i < adev->num_ip_blocks; i++) {
2100                 if (!adev->ip_blocks[i].status.sw)
2101                         continue;
2102                 if (adev->ip_blocks[i].status.hw)
2103                         continue;
2104                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2105                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2106                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2107                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2108                         if (r) {
2109                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2110                                           adev->ip_blocks[i].version->funcs->name, r);
2111                                 return r;
2112                         }
2113                         adev->ip_blocks[i].status.hw = true;
2114                 }
2115         }
2116
2117         return 0;
2118 }
2119
2120 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2121 {
2122         int i, r;
2123
2124         for (i = 0; i < adev->num_ip_blocks; i++) {
2125                 if (!adev->ip_blocks[i].status.sw)
2126                         continue;
2127                 if (adev->ip_blocks[i].status.hw)
2128                         continue;
2129                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2130                 if (r) {
2131                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2132                                   adev->ip_blocks[i].version->funcs->name, r);
2133                         return r;
2134                 }
2135                 adev->ip_blocks[i].status.hw = true;
2136         }
2137
2138         return 0;
2139 }
2140
2141 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2142 {
2143         int r = 0;
2144         int i;
2145         uint32_t smu_version;
2146
2147         if (adev->asic_type >= CHIP_VEGA10) {
2148                 for (i = 0; i < adev->num_ip_blocks; i++) {
2149                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2150                                 continue;
2151
2152                         /* no need to do the fw loading again if already done*/
2153                         if (adev->ip_blocks[i].status.hw == true)
2154                                 break;
2155
2156                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2157                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2158                                 if (r) {
2159                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2160                                                           adev->ip_blocks[i].version->funcs->name, r);
2161                                         return r;
2162                                 }
2163                         } else {
2164                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2165                                 if (r) {
2166                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2167                                                           adev->ip_blocks[i].version->funcs->name, r);
2168                                         return r;
2169                                 }
2170                         }
2171
2172                         adev->ip_blocks[i].status.hw = true;
2173                         break;
2174                 }
2175         }
2176
2177         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2178                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2179
2180         return r;
2181 }
2182
2183 /**
2184  * amdgpu_device_ip_init - run init for hardware IPs
2185  *
2186  * @adev: amdgpu_device pointer
2187  *
2188  * Main initialization pass for hardware IPs.  The list of all the hardware
2189  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2190  * are run.  sw_init initializes the software state associated with each IP
2191  * and hw_init initializes the hardware associated with each IP.
2192  * Returns 0 on success, negative error code on failure.
2193  */
2194 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2195 {
2196         int i, r;
2197
2198         r = amdgpu_ras_init(adev);
2199         if (r)
2200                 return r;
2201
2202         for (i = 0; i < adev->num_ip_blocks; i++) {
2203                 if (!adev->ip_blocks[i].status.valid)
2204                         continue;
2205                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2206                 if (r) {
2207                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2208                                   adev->ip_blocks[i].version->funcs->name, r);
2209                         goto init_failed;
2210                 }
2211                 adev->ip_blocks[i].status.sw = true;
2212
2213                 /* need to do gmc hw init early so we can allocate gpu mem */
2214                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2215                         r = amdgpu_device_vram_scratch_init(adev);
2216                         if (r) {
2217                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2218                                 goto init_failed;
2219                         }
2220                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2221                         if (r) {
2222                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2223                                 goto init_failed;
2224                         }
2225                         r = amdgpu_device_wb_init(adev);
2226                         if (r) {
2227                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2228                                 goto init_failed;
2229                         }
2230                         adev->ip_blocks[i].status.hw = true;
2231
2232                         /* right after GMC hw init, we create CSA */
2233                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2234                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2235                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2236                                                                 AMDGPU_CSA_SIZE);
2237                                 if (r) {
2238                                         DRM_ERROR("allocate CSA failed %d\n", r);
2239                                         goto init_failed;
2240                                 }
2241                         }
2242                 }
2243         }
2244
2245         if (amdgpu_sriov_vf(adev))
2246                 amdgpu_virt_init_data_exchange(adev);
2247
2248         r = amdgpu_ib_pool_init(adev);
2249         if (r) {
2250                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2251                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2252                 goto init_failed;
2253         }
2254
2255         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2256         if (r)
2257                 goto init_failed;
2258
2259         r = amdgpu_device_ip_hw_init_phase1(adev);
2260         if (r)
2261                 goto init_failed;
2262
2263         r = amdgpu_device_fw_loading(adev);
2264         if (r)
2265                 goto init_failed;
2266
2267         r = amdgpu_device_ip_hw_init_phase2(adev);
2268         if (r)
2269                 goto init_failed;
2270
2271         /*
2272          * retired pages will be loaded from eeprom and reserved here,
2273          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2274          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2275          * for I2C communication which only true at this point.
2276          *
2277          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2278          * failure from bad gpu situation and stop amdgpu init process
2279          * accordingly. For other failed cases, it will still release all
2280          * the resource and print error message, rather than returning one
2281          * negative value to upper level.
2282          *
2283          * Note: theoretically, this should be called before all vram allocations
2284          * to protect retired page from abusing
2285          */
2286         r = amdgpu_ras_recovery_init(adev);
2287         if (r)
2288                 goto init_failed;
2289
2290         if (adev->gmc.xgmi.num_physical_nodes > 1)
2291                 amdgpu_xgmi_add_device(adev);
2292         amdgpu_amdkfd_device_init(adev);
2293
2294         amdgpu_fru_get_product_info(adev);
2295
2296 init_failed:
2297         if (amdgpu_sriov_vf(adev))
2298                 amdgpu_virt_release_full_gpu(adev, true);
2299
2300         return r;
2301 }
2302
2303 /**
2304  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2305  *
2306  * @adev: amdgpu_device pointer
2307  *
2308  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2309  * this function before a GPU reset.  If the value is retained after a
2310  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2311  */
2312 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2313 {
2314         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2315 }
2316
2317 /**
2318  * amdgpu_device_check_vram_lost - check if vram is valid
2319  *
2320  * @adev: amdgpu_device pointer
2321  *
2322  * Checks the reset magic value written to the gart pointer in VRAM.
2323  * The driver calls this after a GPU reset to see if the contents of
2324  * VRAM is lost or now.
2325  * returns true if vram is lost, false if not.
2326  */
2327 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2328 {
2329         if (memcmp(adev->gart.ptr, adev->reset_magic,
2330                         AMDGPU_RESET_MAGIC_NUM))
2331                 return true;
2332
2333         if (!amdgpu_in_reset(adev))
2334                 return false;
2335
2336         /*
2337          * For all ASICs with baco/mode1 reset, the VRAM is
2338          * always assumed to be lost.
2339          */
2340         switch (amdgpu_asic_reset_method(adev)) {
2341         case AMD_RESET_METHOD_BACO:
2342         case AMD_RESET_METHOD_MODE1:
2343                 return true;
2344         default:
2345                 return false;
2346         }
2347 }
2348
2349 /**
2350  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2351  *
2352  * @adev: amdgpu_device pointer
2353  * @state: clockgating state (gate or ungate)
2354  *
2355  * The list of all the hardware IPs that make up the asic is walked and the
2356  * set_clockgating_state callbacks are run.
2357  * Late initialization pass enabling clockgating for hardware IPs.
2358  * Fini or suspend, pass disabling clockgating for hardware IPs.
2359  * Returns 0 on success, negative error code on failure.
2360  */
2361
2362 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2363                                                 enum amd_clockgating_state state)
2364 {
2365         int i, j, r;
2366
2367         if (amdgpu_emu_mode == 1)
2368                 return 0;
2369
2370         for (j = 0; j < adev->num_ip_blocks; j++) {
2371                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2372                 if (!adev->ip_blocks[i].status.late_initialized)
2373                         continue;
2374                 /* skip CG for VCE/UVD, it's handled specially */
2375                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2376                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2377                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2378                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2379                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2380                         /* enable clockgating to save power */
2381                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2382                                                                                      state);
2383                         if (r) {
2384                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2385                                           adev->ip_blocks[i].version->funcs->name, r);
2386                                 return r;
2387                         }
2388                 }
2389         }
2390
2391         return 0;
2392 }
2393
2394 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2395 {
2396         int i, j, r;
2397
2398         if (amdgpu_emu_mode == 1)
2399                 return 0;
2400
2401         for (j = 0; j < adev->num_ip_blocks; j++) {
2402                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2403                 if (!adev->ip_blocks[i].status.late_initialized)
2404                         continue;
2405                 /* skip CG for VCE/UVD, it's handled specially */
2406                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2407                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2408                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2409                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2410                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2411                         /* enable powergating to save power */
2412                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2413                                                                                         state);
2414                         if (r) {
2415                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2416                                           adev->ip_blocks[i].version->funcs->name, r);
2417                                 return r;
2418                         }
2419                 }
2420         }
2421         return 0;
2422 }
2423
2424 static int amdgpu_device_enable_mgpu_fan_boost(void)
2425 {
2426         struct amdgpu_gpu_instance *gpu_ins;
2427         struct amdgpu_device *adev;
2428         int i, ret = 0;
2429
2430         mutex_lock(&mgpu_info.mutex);
2431
2432         /*
2433          * MGPU fan boost feature should be enabled
2434          * only when there are two or more dGPUs in
2435          * the system
2436          */
2437         if (mgpu_info.num_dgpu < 2)
2438                 goto out;
2439
2440         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2441                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2442                 adev = gpu_ins->adev;
2443                 if (!(adev->flags & AMD_IS_APU) &&
2444                     !gpu_ins->mgpu_fan_enabled) {
2445                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2446                         if (ret)
2447                                 break;
2448
2449                         gpu_ins->mgpu_fan_enabled = 1;
2450                 }
2451         }
2452
2453 out:
2454         mutex_unlock(&mgpu_info.mutex);
2455
2456         return ret;
2457 }
2458
2459 /**
2460  * amdgpu_device_ip_late_init - run late init for hardware IPs
2461  *
2462  * @adev: amdgpu_device pointer
2463  *
2464  * Late initialization pass for hardware IPs.  The list of all the hardware
2465  * IPs that make up the asic is walked and the late_init callbacks are run.
2466  * late_init covers any special initialization that an IP requires
2467  * after all of the have been initialized or something that needs to happen
2468  * late in the init process.
2469  * Returns 0 on success, negative error code on failure.
2470  */
2471 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2472 {
2473         struct amdgpu_gpu_instance *gpu_instance;
2474         int i = 0, r;
2475
2476         for (i = 0; i < adev->num_ip_blocks; i++) {
2477                 if (!adev->ip_blocks[i].status.hw)
2478                         continue;
2479                 if (adev->ip_blocks[i].version->funcs->late_init) {
2480                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2481                         if (r) {
2482                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2483                                           adev->ip_blocks[i].version->funcs->name, r);
2484                                 return r;
2485                         }
2486                 }
2487                 adev->ip_blocks[i].status.late_initialized = true;
2488         }
2489
2490         amdgpu_ras_set_error_query_ready(adev, true);
2491
2492         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2493         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2494
2495         amdgpu_device_fill_reset_magic(adev);
2496
2497         r = amdgpu_device_enable_mgpu_fan_boost();
2498         if (r)
2499                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2500
2501
2502         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2503                 mutex_lock(&mgpu_info.mutex);
2504
2505                 /*
2506                  * Reset device p-state to low as this was booted with high.
2507                  *
2508                  * This should be performed only after all devices from the same
2509                  * hive get initialized.
2510                  *
2511                  * However, it's unknown how many device in the hive in advance.
2512                  * As this is counted one by one during devices initializations.
2513                  *
2514                  * So, we wait for all XGMI interlinked devices initialized.
2515                  * This may bring some delays as those devices may come from
2516                  * different hives. But that should be OK.
2517                  */
2518                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2519                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2520                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2521                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2522                                         continue;
2523
2524                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2525                                                 AMDGPU_XGMI_PSTATE_MIN);
2526                                 if (r) {
2527                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2528                                         break;
2529                                 }
2530                         }
2531                 }
2532
2533                 mutex_unlock(&mgpu_info.mutex);
2534         }
2535
2536         return 0;
2537 }
2538
2539 /**
2540  * amdgpu_device_ip_fini - run fini for hardware IPs
2541  *
2542  * @adev: amdgpu_device pointer
2543  *
2544  * Main teardown pass for hardware IPs.  The list of all the hardware
2545  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2546  * are run.  hw_fini tears down the hardware associated with each IP
2547  * and sw_fini tears down any software state associated with each IP.
2548  * Returns 0 on success, negative error code on failure.
2549  */
2550 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2551 {
2552         int i, r;
2553
2554         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2555                 amdgpu_virt_release_ras_err_handler_data(adev);
2556
2557         amdgpu_ras_pre_fini(adev);
2558
2559         if (adev->gmc.xgmi.num_physical_nodes > 1)
2560                 amdgpu_xgmi_remove_device(adev);
2561
2562         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2563         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2564
2565         amdgpu_amdkfd_device_fini(adev);
2566
2567         /* need to disable SMC first */
2568         for (i = 0; i < adev->num_ip_blocks; i++) {
2569                 if (!adev->ip_blocks[i].status.hw)
2570                         continue;
2571                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2572                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2573                         /* XXX handle errors */
2574                         if (r) {
2575                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2576                                           adev->ip_blocks[i].version->funcs->name, r);
2577                         }
2578                         adev->ip_blocks[i].status.hw = false;
2579                         break;
2580                 }
2581         }
2582
2583         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2584                 if (!adev->ip_blocks[i].status.hw)
2585                         continue;
2586
2587                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2588                 /* XXX handle errors */
2589                 if (r) {
2590                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2591                                   adev->ip_blocks[i].version->funcs->name, r);
2592                 }
2593
2594                 adev->ip_blocks[i].status.hw = false;
2595         }
2596
2597
2598         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2599                 if (!adev->ip_blocks[i].status.sw)
2600                         continue;
2601
2602                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2603                         amdgpu_ucode_free_bo(adev);
2604                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2605                         amdgpu_device_wb_fini(adev);
2606                         amdgpu_device_vram_scratch_fini(adev);
2607                         amdgpu_ib_pool_fini(adev);
2608                 }
2609
2610                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2611                 /* XXX handle errors */
2612                 if (r) {
2613                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2614                                   adev->ip_blocks[i].version->funcs->name, r);
2615                 }
2616                 adev->ip_blocks[i].status.sw = false;
2617                 adev->ip_blocks[i].status.valid = false;
2618         }
2619
2620         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2621                 if (!adev->ip_blocks[i].status.late_initialized)
2622                         continue;
2623                 if (adev->ip_blocks[i].version->funcs->late_fini)
2624                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2625                 adev->ip_blocks[i].status.late_initialized = false;
2626         }
2627
2628         amdgpu_ras_fini(adev);
2629
2630         if (amdgpu_sriov_vf(adev))
2631                 if (amdgpu_virt_release_full_gpu(adev, false))
2632                         DRM_ERROR("failed to release exclusive mode on fini\n");
2633
2634         return 0;
2635 }
2636
2637 /**
2638  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2639  *
2640  * @work: work_struct.
2641  */
2642 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2643 {
2644         struct amdgpu_device *adev =
2645                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2646         int r;
2647
2648         r = amdgpu_ib_ring_tests(adev);
2649         if (r)
2650                 DRM_ERROR("ib ring test failed (%d).\n", r);
2651 }
2652
2653 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2654 {
2655         struct amdgpu_device *adev =
2656                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2657
2658         mutex_lock(&adev->gfx.gfx_off_mutex);
2659         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2660                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2661                         adev->gfx.gfx_off_state = true;
2662         }
2663         mutex_unlock(&adev->gfx.gfx_off_mutex);
2664 }
2665
2666 /**
2667  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2668  *
2669  * @adev: amdgpu_device pointer
2670  *
2671  * Main suspend function for hardware IPs.  The list of all the hardware
2672  * IPs that make up the asic is walked, clockgating is disabled and the
2673  * suspend callbacks are run.  suspend puts the hardware and software state
2674  * in each IP into a state suitable for suspend.
2675  * Returns 0 on success, negative error code on failure.
2676  */
2677 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2678 {
2679         int i, r;
2680
2681         if (adev->in_poweroff_reboot_com ||
2682             !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
2683                 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2684                 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2685         }
2686
2687         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2688                 if (!adev->ip_blocks[i].status.valid)
2689                         continue;
2690
2691                 /* displays are handled separately */
2692                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2693                         continue;
2694
2695                 /* XXX handle errors */
2696                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2697                 /* XXX handle errors */
2698                 if (r) {
2699                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2700                                   adev->ip_blocks[i].version->funcs->name, r);
2701                         return r;
2702                 }
2703
2704                 adev->ip_blocks[i].status.hw = false;
2705         }
2706
2707         return 0;
2708 }
2709
2710 /**
2711  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2712  *
2713  * @adev: amdgpu_device pointer
2714  *
2715  * Main suspend function for hardware IPs.  The list of all the hardware
2716  * IPs that make up the asic is walked, clockgating is disabled and the
2717  * suspend callbacks are run.  suspend puts the hardware and software state
2718  * in each IP into a state suitable for suspend.
2719  * Returns 0 on success, negative error code on failure.
2720  */
2721 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2722 {
2723         int i, r;
2724
2725         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2726                 if (!adev->ip_blocks[i].status.valid)
2727                         continue;
2728                 /* displays are handled in phase1 */
2729                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2730                         continue;
2731                 /* PSP lost connection when err_event_athub occurs */
2732                 if (amdgpu_ras_intr_triggered() &&
2733                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2734                         adev->ip_blocks[i].status.hw = false;
2735                         continue;
2736                 }
2737                 /* XXX handle errors */
2738                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2739                 /* XXX handle errors */
2740                 if (r) {
2741                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2742                                   adev->ip_blocks[i].version->funcs->name, r);
2743                 }
2744                 adev->ip_blocks[i].status.hw = false;
2745                 /* handle putting the SMC in the appropriate state */
2746                 if(!amdgpu_sriov_vf(adev)){
2747                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2748                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2749                                 if (r) {
2750                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2751                                                         adev->mp1_state, r);
2752                                         return r;
2753                                 }
2754                         }
2755                 }
2756                 adev->ip_blocks[i].status.hw = false;
2757         }
2758
2759         return 0;
2760 }
2761
2762 /**
2763  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2764  *
2765  * @adev: amdgpu_device pointer
2766  *
2767  * Main suspend function for hardware IPs.  The list of all the hardware
2768  * IPs that make up the asic is walked, clockgating is disabled and the
2769  * suspend callbacks are run.  suspend puts the hardware and software state
2770  * in each IP into a state suitable for suspend.
2771  * Returns 0 on success, negative error code on failure.
2772  */
2773 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2774 {
2775         int r;
2776
2777         if (amdgpu_sriov_vf(adev))
2778                 amdgpu_virt_request_full_gpu(adev, false);
2779
2780         r = amdgpu_device_ip_suspend_phase1(adev);
2781         if (r)
2782                 return r;
2783         r = amdgpu_device_ip_suspend_phase2(adev);
2784
2785         if (amdgpu_sriov_vf(adev))
2786                 amdgpu_virt_release_full_gpu(adev, false);
2787
2788         return r;
2789 }
2790
2791 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2792 {
2793         int i, r;
2794
2795         static enum amd_ip_block_type ip_order[] = {
2796                 AMD_IP_BLOCK_TYPE_GMC,
2797                 AMD_IP_BLOCK_TYPE_COMMON,
2798                 AMD_IP_BLOCK_TYPE_PSP,
2799                 AMD_IP_BLOCK_TYPE_IH,
2800         };
2801
2802         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2803                 int j;
2804                 struct amdgpu_ip_block *block;
2805
2806                 block = &adev->ip_blocks[i];
2807                 block->status.hw = false;
2808
2809                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2810
2811                         if (block->version->type != ip_order[j] ||
2812                                 !block->status.valid)
2813                                 continue;
2814
2815                         r = block->version->funcs->hw_init(adev);
2816                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2817                         if (r)
2818                                 return r;
2819                         block->status.hw = true;
2820                 }
2821         }
2822
2823         return 0;
2824 }
2825
2826 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2827 {
2828         int i, r;
2829
2830         static enum amd_ip_block_type ip_order[] = {
2831                 AMD_IP_BLOCK_TYPE_SMC,
2832                 AMD_IP_BLOCK_TYPE_DCE,
2833                 AMD_IP_BLOCK_TYPE_GFX,
2834                 AMD_IP_BLOCK_TYPE_SDMA,
2835                 AMD_IP_BLOCK_TYPE_UVD,
2836                 AMD_IP_BLOCK_TYPE_VCE,
2837                 AMD_IP_BLOCK_TYPE_VCN
2838         };
2839
2840         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2841                 int j;
2842                 struct amdgpu_ip_block *block;
2843
2844                 for (j = 0; j < adev->num_ip_blocks; j++) {
2845                         block = &adev->ip_blocks[j];
2846
2847                         if (block->version->type != ip_order[i] ||
2848                                 !block->status.valid ||
2849                                 block->status.hw)
2850                                 continue;
2851
2852                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2853                                 r = block->version->funcs->resume(adev);
2854                         else
2855                                 r = block->version->funcs->hw_init(adev);
2856
2857                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2858                         if (r)
2859                                 return r;
2860                         block->status.hw = true;
2861                 }
2862         }
2863
2864         return 0;
2865 }
2866
2867 /**
2868  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2869  *
2870  * @adev: amdgpu_device pointer
2871  *
2872  * First resume function for hardware IPs.  The list of all the hardware
2873  * IPs that make up the asic is walked and the resume callbacks are run for
2874  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2875  * after a suspend and updates the software state as necessary.  This
2876  * function is also used for restoring the GPU after a GPU reset.
2877  * Returns 0 on success, negative error code on failure.
2878  */
2879 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2880 {
2881         int i, r;
2882
2883         for (i = 0; i < adev->num_ip_blocks; i++) {
2884                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2885                         continue;
2886                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2887                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2888                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2889
2890                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2891                         if (r) {
2892                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2893                                           adev->ip_blocks[i].version->funcs->name, r);
2894                                 return r;
2895                         }
2896                         adev->ip_blocks[i].status.hw = true;
2897                 }
2898         }
2899
2900         return 0;
2901 }
2902
2903 /**
2904  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2905  *
2906  * @adev: amdgpu_device pointer
2907  *
2908  * First resume function for hardware IPs.  The list of all the hardware
2909  * IPs that make up the asic is walked and the resume callbacks are run for
2910  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2911  * functional state after a suspend and updates the software state as
2912  * necessary.  This function is also used for restoring the GPU after a GPU
2913  * reset.
2914  * Returns 0 on success, negative error code on failure.
2915  */
2916 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2917 {
2918         int i, r;
2919
2920         for (i = 0; i < adev->num_ip_blocks; i++) {
2921                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2922                         continue;
2923                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2924                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2925                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2926                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2927                         continue;
2928                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2929                 if (r) {
2930                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2931                                   adev->ip_blocks[i].version->funcs->name, r);
2932                         return r;
2933                 }
2934                 adev->ip_blocks[i].status.hw = true;
2935         }
2936
2937         return 0;
2938 }
2939
2940 /**
2941  * amdgpu_device_ip_resume - run resume for hardware IPs
2942  *
2943  * @adev: amdgpu_device pointer
2944  *
2945  * Main resume function for hardware IPs.  The hardware IPs
2946  * are split into two resume functions because they are
2947  * are also used in in recovering from a GPU reset and some additional
2948  * steps need to be take between them.  In this case (S3/S4) they are
2949  * run sequentially.
2950  * Returns 0 on success, negative error code on failure.
2951  */
2952 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2953 {
2954         int r;
2955
2956         r = amdgpu_device_ip_resume_phase1(adev);
2957         if (r)
2958                 return r;
2959
2960         r = amdgpu_device_fw_loading(adev);
2961         if (r)
2962                 return r;
2963
2964         r = amdgpu_device_ip_resume_phase2(adev);
2965
2966         return r;
2967 }
2968
2969 /**
2970  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2971  *
2972  * @adev: amdgpu_device pointer
2973  *
2974  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2975  */
2976 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2977 {
2978         if (amdgpu_sriov_vf(adev)) {
2979                 if (adev->is_atom_fw) {
2980                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2981                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2982                 } else {
2983                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2984                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2985                 }
2986
2987                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2988                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2989         }
2990 }
2991
2992 /**
2993  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2994  *
2995  * @asic_type: AMD asic type
2996  *
2997  * Check if there is DC (new modesetting infrastructre) support for an asic.
2998  * returns true if DC has support, false if not.
2999  */
3000 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3001 {
3002         switch (asic_type) {
3003 #if defined(CONFIG_DRM_AMD_DC)
3004 #if defined(CONFIG_DRM_AMD_DC_SI)
3005         case CHIP_TAHITI:
3006         case CHIP_PITCAIRN:
3007         case CHIP_VERDE:
3008         case CHIP_OLAND:
3009 #endif
3010         case CHIP_BONAIRE:
3011         case CHIP_KAVERI:
3012         case CHIP_KABINI:
3013         case CHIP_MULLINS:
3014                 /*
3015                  * We have systems in the wild with these ASICs that require
3016                  * LVDS and VGA support which is not supported with DC.
3017                  *
3018                  * Fallback to the non-DC driver here by default so as not to
3019                  * cause regressions.
3020                  */
3021                 return amdgpu_dc > 0;
3022         case CHIP_HAWAII:
3023         case CHIP_CARRIZO:
3024         case CHIP_STONEY:
3025         case CHIP_POLARIS10:
3026         case CHIP_POLARIS11:
3027         case CHIP_POLARIS12:
3028         case CHIP_VEGAM:
3029         case CHIP_TONGA:
3030         case CHIP_FIJI:
3031         case CHIP_VEGA10:
3032         case CHIP_VEGA12:
3033         case CHIP_VEGA20:
3034 #if defined(CONFIG_DRM_AMD_DC_DCN)
3035         case CHIP_RAVEN:
3036         case CHIP_NAVI10:
3037         case CHIP_NAVI14:
3038         case CHIP_NAVI12:
3039         case CHIP_RENOIR:
3040         case CHIP_SIENNA_CICHLID:
3041         case CHIP_NAVY_FLOUNDER:
3042         case CHIP_DIMGREY_CAVEFISH:
3043         case CHIP_VANGOGH:
3044 #endif
3045                 return amdgpu_dc != 0;
3046 #endif
3047         default:
3048                 if (amdgpu_dc > 0)
3049                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3050                                          "but isn't supported by ASIC, ignoring\n");
3051                 return false;
3052         }
3053 }
3054
3055 /**
3056  * amdgpu_device_has_dc_support - check if dc is supported
3057  *
3058  * @adev: amdgpu_device pointer
3059  *
3060  * Returns true for supported, false for not supported
3061  */
3062 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3063 {
3064         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3065                 return false;
3066
3067         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3068 }
3069
3070
3071 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3072 {
3073         struct amdgpu_device *adev =
3074                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3075         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3076
3077         /* It's a bug to not have a hive within this function */
3078         if (WARN_ON(!hive))
3079                 return;
3080
3081         /*
3082          * Use task barrier to synchronize all xgmi reset works across the
3083          * hive. task_barrier_enter and task_barrier_exit will block
3084          * until all the threads running the xgmi reset works reach
3085          * those points. task_barrier_full will do both blocks.
3086          */
3087         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3088
3089                 task_barrier_enter(&hive->tb);
3090                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3091
3092                 if (adev->asic_reset_res)
3093                         goto fail;
3094
3095                 task_barrier_exit(&hive->tb);
3096                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3097
3098                 if (adev->asic_reset_res)
3099                         goto fail;
3100
3101                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3102                         adev->mmhub.funcs->reset_ras_error_count(adev);
3103         } else {
3104
3105                 task_barrier_full(&hive->tb);
3106                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3107         }
3108
3109 fail:
3110         if (adev->asic_reset_res)
3111                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3112                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3113         amdgpu_put_xgmi_hive(hive);
3114 }
3115
3116 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3117 {
3118         char *input = amdgpu_lockup_timeout;
3119         char *timeout_setting = NULL;
3120         int index = 0;
3121         long timeout;
3122         int ret = 0;
3123
3124         /*
3125          * By default timeout for non compute jobs is 10000.
3126          * And there is no timeout enforced on compute jobs.
3127          * In SR-IOV or passthrough mode, timeout for compute
3128          * jobs are 60000 by default.
3129          */
3130         adev->gfx_timeout = msecs_to_jiffies(10000);
3131         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3132         if (amdgpu_sriov_vf(adev))
3133                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3134                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3135         else if (amdgpu_passthrough(adev))
3136                 adev->compute_timeout =  msecs_to_jiffies(60000);
3137         else
3138                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3139
3140         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3141                 while ((timeout_setting = strsep(&input, ",")) &&
3142                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3143                         ret = kstrtol(timeout_setting, 0, &timeout);
3144                         if (ret)
3145                                 return ret;
3146
3147                         if (timeout == 0) {
3148                                 index++;
3149                                 continue;
3150                         } else if (timeout < 0) {
3151                                 timeout = MAX_SCHEDULE_TIMEOUT;
3152                         } else {
3153                                 timeout = msecs_to_jiffies(timeout);
3154                         }
3155
3156                         switch (index++) {
3157                         case 0:
3158                                 adev->gfx_timeout = timeout;
3159                                 break;
3160                         case 1:
3161                                 adev->compute_timeout = timeout;
3162                                 break;
3163                         case 2:
3164                                 adev->sdma_timeout = timeout;
3165                                 break;
3166                         case 3:
3167                                 adev->video_timeout = timeout;
3168                                 break;
3169                         default:
3170                                 break;
3171                         }
3172                 }
3173                 /*
3174                  * There is only one value specified and
3175                  * it should apply to all non-compute jobs.
3176                  */
3177                 if (index == 1) {
3178                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3179                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3180                                 adev->compute_timeout = adev->gfx_timeout;
3181                 }
3182         }
3183
3184         return ret;
3185 }
3186
3187 static const struct attribute *amdgpu_dev_attributes[] = {
3188         &dev_attr_product_name.attr,
3189         &dev_attr_product_number.attr,
3190         &dev_attr_serial_number.attr,
3191         &dev_attr_pcie_replay_count.attr,
3192         NULL
3193 };
3194
3195
3196 /**
3197  * amdgpu_device_init - initialize the driver
3198  *
3199  * @adev: amdgpu_device pointer
3200  * @flags: driver flags
3201  *
3202  * Initializes the driver info and hw (all asics).
3203  * Returns 0 for success or an error on failure.
3204  * Called at driver startup.
3205  */
3206 int amdgpu_device_init(struct amdgpu_device *adev,
3207                        uint32_t flags)
3208 {
3209         struct drm_device *ddev = adev_to_drm(adev);
3210         struct pci_dev *pdev = adev->pdev;
3211         int r, i;
3212         bool atpx = false;
3213         u32 max_MBps;
3214
3215         adev->shutdown = false;
3216         adev->flags = flags;
3217
3218         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3219                 adev->asic_type = amdgpu_force_asic_type;
3220         else
3221                 adev->asic_type = flags & AMD_ASIC_MASK;
3222
3223         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3224         if (amdgpu_emu_mode == 1)
3225                 adev->usec_timeout *= 10;
3226         adev->gmc.gart_size = 512 * 1024 * 1024;
3227         adev->accel_working = false;
3228         adev->num_rings = 0;
3229         adev->mman.buffer_funcs = NULL;
3230         adev->mman.buffer_funcs_ring = NULL;
3231         adev->vm_manager.vm_pte_funcs = NULL;
3232         adev->vm_manager.vm_pte_num_scheds = 0;
3233         adev->gmc.gmc_funcs = NULL;
3234         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3235         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3236
3237         adev->smc_rreg = &amdgpu_invalid_rreg;
3238         adev->smc_wreg = &amdgpu_invalid_wreg;
3239         adev->pcie_rreg = &amdgpu_invalid_rreg;
3240         adev->pcie_wreg = &amdgpu_invalid_wreg;
3241         adev->pciep_rreg = &amdgpu_invalid_rreg;
3242         adev->pciep_wreg = &amdgpu_invalid_wreg;
3243         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3244         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3245         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3246         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3247         adev->didt_rreg = &amdgpu_invalid_rreg;
3248         adev->didt_wreg = &amdgpu_invalid_wreg;
3249         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3250         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3251         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3252         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3253
3254         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3255                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3256                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3257
3258         /* mutex initialization are all done here so we
3259          * can recall function without having locking issues */
3260         atomic_set(&adev->irq.ih.lock, 0);
3261         mutex_init(&adev->firmware.mutex);
3262         mutex_init(&adev->pm.mutex);
3263         mutex_init(&adev->gfx.gpu_clock_mutex);
3264         mutex_init(&adev->srbm_mutex);
3265         mutex_init(&adev->gfx.pipe_reserve_mutex);
3266         mutex_init(&adev->gfx.gfx_off_mutex);
3267         mutex_init(&adev->grbm_idx_mutex);
3268         mutex_init(&adev->mn_lock);
3269         mutex_init(&adev->virt.vf_errors.lock);
3270         hash_init(adev->mn_hash);
3271         atomic_set(&adev->in_gpu_reset, 0);
3272         init_rwsem(&adev->reset_sem);
3273         mutex_init(&adev->psp.mutex);
3274         mutex_init(&adev->notifier_lock);
3275
3276         r = amdgpu_device_check_arguments(adev);
3277         if (r)
3278                 return r;
3279
3280         spin_lock_init(&adev->mmio_idx_lock);
3281         spin_lock_init(&adev->smc_idx_lock);
3282         spin_lock_init(&adev->pcie_idx_lock);
3283         spin_lock_init(&adev->uvd_ctx_idx_lock);
3284         spin_lock_init(&adev->didt_idx_lock);
3285         spin_lock_init(&adev->gc_cac_idx_lock);
3286         spin_lock_init(&adev->se_cac_idx_lock);
3287         spin_lock_init(&adev->audio_endpt_idx_lock);
3288         spin_lock_init(&adev->mm_stats.lock);
3289
3290         INIT_LIST_HEAD(&adev->shadow_list);
3291         mutex_init(&adev->shadow_list_lock);
3292
3293         INIT_DELAYED_WORK(&adev->delayed_init_work,
3294                           amdgpu_device_delayed_init_work_handler);
3295         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3296                           amdgpu_device_delay_enable_gfx_off);
3297
3298         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3299
3300         adev->gfx.gfx_off_req_count = 1;
3301         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3302
3303         atomic_set(&adev->throttling_logging_enabled, 1);
3304         /*
3305          * If throttling continues, logging will be performed every minute
3306          * to avoid log flooding. "-1" is subtracted since the thermal
3307          * throttling interrupt comes every second. Thus, the total logging
3308          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3309          * for throttling interrupt) = 60 seconds.
3310          */
3311         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3312         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3313
3314         /* Registers mapping */
3315         /* TODO: block userspace mapping of io register */
3316         if (adev->asic_type >= CHIP_BONAIRE) {
3317                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3318                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3319         } else {
3320                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3321                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3322         }
3323
3324         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3325         if (adev->rmmio == NULL) {
3326                 return -ENOMEM;
3327         }
3328         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3329         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3330
3331         /* io port mapping */
3332         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3333                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3334                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3335                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3336                         break;
3337                 }
3338         }
3339         if (adev->rio_mem == NULL)
3340                 DRM_INFO("PCI I/O BAR is not found.\n");
3341
3342         /* enable PCIE atomic ops */
3343         r = pci_enable_atomic_ops_to_root(adev->pdev,
3344                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3345                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3346         if (r) {
3347                 adev->have_atomics_support = false;
3348                 DRM_INFO("PCIE atomic ops is not supported\n");
3349         } else {
3350                 adev->have_atomics_support = true;
3351         }
3352
3353         amdgpu_device_get_pcie_info(adev);
3354
3355         if (amdgpu_mcbp)
3356                 DRM_INFO("MCBP is enabled\n");
3357
3358         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3359                 adev->enable_mes = true;
3360
3361         /* detect hw virtualization here */
3362         amdgpu_detect_virtualization(adev);
3363
3364         r = amdgpu_device_get_job_timeout_settings(adev);
3365         if (r) {
3366                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3367                 goto failed_unmap;
3368         }
3369
3370         /* early init functions */
3371         r = amdgpu_device_ip_early_init(adev);
3372         if (r)
3373                 goto failed_unmap;
3374
3375         /* doorbell bar mapping and doorbell index init*/
3376         amdgpu_device_doorbell_init(adev);
3377
3378         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3379         /* this will fail for cards that aren't VGA class devices, just
3380          * ignore it */
3381         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3382                 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3383
3384         if (amdgpu_device_supports_atpx(ddev))
3385                 atpx = true;
3386         if (amdgpu_has_atpx() &&
3387             (amdgpu_is_atpx_hybrid() ||
3388              amdgpu_has_atpx_dgpu_power_cntl()) &&
3389             !pci_is_thunderbolt_attached(adev->pdev))
3390                 vga_switcheroo_register_client(adev->pdev,
3391                                                &amdgpu_switcheroo_ops, atpx);
3392         if (atpx)
3393                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3394
3395         if (amdgpu_emu_mode == 1) {
3396                 /* post the asic on emulation mode */
3397                 emu_soc_asic_init(adev);
3398                 goto fence_driver_init;
3399         }
3400
3401         /* detect if we are with an SRIOV vbios */
3402         amdgpu_device_detect_sriov_bios(adev);
3403
3404         /* check if we need to reset the asic
3405          *  E.g., driver was not cleanly unloaded previously, etc.
3406          */
3407         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3408                 r = amdgpu_asic_reset(adev);
3409                 if (r) {
3410                         dev_err(adev->dev, "asic reset on init failed\n");
3411                         goto failed;
3412                 }
3413         }
3414
3415         pci_enable_pcie_error_reporting(adev->pdev);
3416
3417         /* Post card if necessary */
3418         if (amdgpu_device_need_post(adev)) {
3419                 if (!adev->bios) {
3420                         dev_err(adev->dev, "no vBIOS found\n");
3421                         r = -EINVAL;
3422                         goto failed;
3423                 }
3424                 DRM_INFO("GPU posting now...\n");
3425                 r = amdgpu_device_asic_init(adev);
3426                 if (r) {
3427                         dev_err(adev->dev, "gpu post error!\n");
3428                         goto failed;
3429                 }
3430         }
3431
3432         if (adev->is_atom_fw) {
3433                 /* Initialize clocks */
3434                 r = amdgpu_atomfirmware_get_clock_info(adev);
3435                 if (r) {
3436                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3437                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3438                         goto failed;
3439                 }
3440         } else {
3441                 /* Initialize clocks */
3442                 r = amdgpu_atombios_get_clock_info(adev);
3443                 if (r) {
3444                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3445                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3446                         goto failed;
3447                 }
3448                 /* init i2c buses */
3449                 if (!amdgpu_device_has_dc_support(adev))
3450                         amdgpu_atombios_i2c_init(adev);
3451         }
3452
3453 fence_driver_init:
3454         /* Fence driver */
3455         r = amdgpu_fence_driver_init(adev);
3456         if (r) {
3457                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3458                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3459                 goto failed;
3460         }
3461
3462         /* init the mode config */
3463         drm_mode_config_init(adev_to_drm(adev));
3464
3465         r = amdgpu_device_ip_init(adev);
3466         if (r) {
3467                 /* failed in exclusive mode due to timeout */
3468                 if (amdgpu_sriov_vf(adev) &&
3469                     !amdgpu_sriov_runtime(adev) &&
3470                     amdgpu_virt_mmio_blocked(adev) &&
3471                     !amdgpu_virt_wait_reset(adev)) {
3472                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3473                         /* Don't send request since VF is inactive. */
3474                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3475                         adev->virt.ops = NULL;
3476                         r = -EAGAIN;
3477                         goto failed;
3478                 }
3479                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3480                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3481                 goto failed;
3482         }
3483
3484         dev_info(adev->dev,
3485                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3486                         adev->gfx.config.max_shader_engines,
3487                         adev->gfx.config.max_sh_per_se,
3488                         adev->gfx.config.max_cu_per_sh,
3489                         adev->gfx.cu_info.number);
3490
3491         adev->accel_working = true;
3492
3493         amdgpu_vm_check_compute_bug(adev);
3494
3495         /* Initialize the buffer migration limit. */
3496         if (amdgpu_moverate >= 0)
3497                 max_MBps = amdgpu_moverate;
3498         else
3499                 max_MBps = 8; /* Allow 8 MB/s. */
3500         /* Get a log2 for easy divisions. */
3501         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3502
3503         amdgpu_fbdev_init(adev);
3504
3505         r = amdgpu_pm_sysfs_init(adev);
3506         if (r) {
3507                 adev->pm_sysfs_en = false;
3508                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3509         } else
3510                 adev->pm_sysfs_en = true;
3511
3512         r = amdgpu_ucode_sysfs_init(adev);
3513         if (r) {
3514                 adev->ucode_sysfs_en = false;
3515                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3516         } else
3517                 adev->ucode_sysfs_en = true;
3518
3519         if ((amdgpu_testing & 1)) {
3520                 if (adev->accel_working)
3521                         amdgpu_test_moves(adev);
3522                 else
3523                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3524         }
3525         if (amdgpu_benchmarking) {
3526                 if (adev->accel_working)
3527                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3528                 else
3529                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3530         }
3531
3532         /*
3533          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3534          * Otherwise the mgpu fan boost feature will be skipped due to the
3535          * gpu instance is counted less.
3536          */
3537         amdgpu_register_gpu_instance(adev);
3538
3539         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3540          * explicit gating rather than handling it automatically.
3541          */
3542         r = amdgpu_device_ip_late_init(adev);
3543         if (r) {
3544                 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3545                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3546                 goto failed;
3547         }
3548
3549         /* must succeed. */
3550         amdgpu_ras_resume(adev);
3551
3552         queue_delayed_work(system_wq, &adev->delayed_init_work,
3553                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3554
3555         if (amdgpu_sriov_vf(adev))
3556                 flush_delayed_work(&adev->delayed_init_work);
3557
3558         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3559         if (r)
3560                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3561
3562         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3563                 r = amdgpu_pmu_init(adev);
3564         if (r)
3565                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3566
3567         /* Have stored pci confspace at hand for restore in sudden PCI error */
3568         if (amdgpu_device_cache_pci_state(adev->pdev))
3569                 pci_restore_state(pdev);
3570
3571         return 0;
3572
3573 failed:
3574         amdgpu_vf_error_trans_all(adev);
3575         if (atpx)
3576                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3577
3578 failed_unmap:
3579         iounmap(adev->rmmio);
3580         adev->rmmio = NULL;
3581
3582         return r;
3583 }
3584
3585 /**
3586  * amdgpu_device_fini - tear down the driver
3587  *
3588  * @adev: amdgpu_device pointer
3589  *
3590  * Tear down the driver info (all asics).
3591  * Called at driver shutdown.
3592  */
3593 void amdgpu_device_fini(struct amdgpu_device *adev)
3594 {
3595         dev_info(adev->dev, "amdgpu: finishing device.\n");
3596         flush_delayed_work(&adev->delayed_init_work);
3597         adev->shutdown = true;
3598
3599         kfree(adev->pci_state);
3600
3601         /* make sure IB test finished before entering exclusive mode
3602          * to avoid preemption on IB test
3603          * */
3604         if (amdgpu_sriov_vf(adev)) {
3605                 amdgpu_virt_request_full_gpu(adev, false);
3606                 amdgpu_virt_fini_data_exchange(adev);
3607         }
3608
3609         /* disable all interrupts */
3610         amdgpu_irq_disable_all(adev);
3611         if (adev->mode_info.mode_config_initialized){
3612                 if (!amdgpu_device_has_dc_support(adev))
3613                         drm_helper_force_disable_all(adev_to_drm(adev));
3614                 else
3615                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3616         }
3617         amdgpu_fence_driver_fini(adev);
3618         if (adev->pm_sysfs_en)
3619                 amdgpu_pm_sysfs_fini(adev);
3620         amdgpu_fbdev_fini(adev);
3621         amdgpu_device_ip_fini(adev);
3622         release_firmware(adev->firmware.gpu_info_fw);
3623         adev->firmware.gpu_info_fw = NULL;
3624         adev->accel_working = false;
3625         /* free i2c buses */
3626         if (!amdgpu_device_has_dc_support(adev))
3627                 amdgpu_i2c_fini(adev);
3628
3629         if (amdgpu_emu_mode != 1)
3630                 amdgpu_atombios_fini(adev);
3631
3632         kfree(adev->bios);
3633         adev->bios = NULL;
3634         if (amdgpu_has_atpx() &&
3635             (amdgpu_is_atpx_hybrid() ||
3636              amdgpu_has_atpx_dgpu_power_cntl()) &&
3637             !pci_is_thunderbolt_attached(adev->pdev))
3638                 vga_switcheroo_unregister_client(adev->pdev);
3639         if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
3640                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3641         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3642                 vga_client_register(adev->pdev, NULL, NULL, NULL);
3643         if (adev->rio_mem)
3644                 pci_iounmap(adev->pdev, adev->rio_mem);
3645         adev->rio_mem = NULL;
3646         iounmap(adev->rmmio);
3647         adev->rmmio = NULL;
3648         amdgpu_device_doorbell_fini(adev);
3649
3650         if (adev->ucode_sysfs_en)
3651                 amdgpu_ucode_sysfs_fini(adev);
3652
3653         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3654         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3655                 amdgpu_pmu_fini(adev);
3656         if (adev->mman.discovery_bin)
3657                 amdgpu_discovery_fini(adev);
3658 }
3659
3660
3661 /*
3662  * Suspend & resume.
3663  */
3664 /**
3665  * amdgpu_device_suspend - initiate device suspend
3666  *
3667  * @dev: drm dev pointer
3668  * @fbcon : notify the fbdev of suspend
3669  *
3670  * Puts the hw in the suspend state (all asics).
3671  * Returns 0 for success or an error on failure.
3672  * Called at driver suspend.
3673  */
3674 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3675 {
3676         struct amdgpu_device *adev;
3677         struct drm_crtc *crtc;
3678         struct drm_connector *connector;
3679         struct drm_connector_list_iter iter;
3680         int r;
3681
3682         adev = drm_to_adev(dev);
3683
3684         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3685                 return 0;
3686
3687         adev->in_suspend = true;
3688         drm_kms_helper_poll_disable(dev);
3689
3690         if (fbcon)
3691                 amdgpu_fbdev_set_suspend(adev, 1);
3692
3693         cancel_delayed_work_sync(&adev->delayed_init_work);
3694
3695         if (!amdgpu_device_has_dc_support(adev)) {
3696                 /* turn off display hw */
3697                 drm_modeset_lock_all(dev);
3698                 drm_connector_list_iter_begin(dev, &iter);
3699                 drm_for_each_connector_iter(connector, &iter)
3700                         drm_helper_connector_dpms(connector,
3701                                                   DRM_MODE_DPMS_OFF);
3702                 drm_connector_list_iter_end(&iter);
3703                 drm_modeset_unlock_all(dev);
3704                         /* unpin the front buffers and cursors */
3705                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3706                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3707                         struct drm_framebuffer *fb = crtc->primary->fb;
3708                         struct amdgpu_bo *robj;
3709
3710                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3711                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3712                                 r = amdgpu_bo_reserve(aobj, true);
3713                                 if (r == 0) {
3714                                         amdgpu_bo_unpin(aobj);
3715                                         amdgpu_bo_unreserve(aobj);
3716                                 }
3717                         }
3718
3719                         if (fb == NULL || fb->obj[0] == NULL) {
3720                                 continue;
3721                         }
3722                         robj = gem_to_amdgpu_bo(fb->obj[0]);
3723                         /* don't unpin kernel fb objects */
3724                         if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3725                                 r = amdgpu_bo_reserve(robj, true);
3726                                 if (r == 0) {
3727                                         amdgpu_bo_unpin(robj);
3728                                         amdgpu_bo_unreserve(robj);
3729                                 }
3730                         }
3731                 }
3732         }
3733
3734         amdgpu_ras_suspend(adev);
3735
3736         r = amdgpu_device_ip_suspend_phase1(adev);
3737
3738         amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3739
3740         /* evict vram memory */
3741         amdgpu_bo_evict_vram(adev);
3742
3743         amdgpu_fence_driver_suspend(adev);
3744
3745         if (adev->in_poweroff_reboot_com ||
3746             !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
3747                 r = amdgpu_device_ip_suspend_phase2(adev);
3748         else
3749                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
3750         /* evict remaining vram memory
3751          * This second call to evict vram is to evict the gart page table
3752          * using the CPU.
3753          */
3754         amdgpu_bo_evict_vram(adev);
3755
3756         return 0;
3757 }
3758
3759 /**
3760  * amdgpu_device_resume - initiate device resume
3761  *
3762  * @dev: drm dev pointer
3763  * @fbcon : notify the fbdev of resume
3764  *
3765  * Bring the hw back to operating state (all asics).
3766  * Returns 0 for success or an error on failure.
3767  * Called at driver resume.
3768  */
3769 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3770 {
3771         struct drm_connector *connector;
3772         struct drm_connector_list_iter iter;
3773         struct amdgpu_device *adev = drm_to_adev(dev);
3774         struct drm_crtc *crtc;
3775         int r = 0;
3776
3777         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3778                 return 0;
3779
3780         if (amdgpu_acpi_is_s0ix_supported(adev))
3781                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3782
3783         /* post card */
3784         if (amdgpu_device_need_post(adev)) {
3785                 r = amdgpu_device_asic_init(adev);
3786                 if (r)
3787                         dev_err(adev->dev, "amdgpu asic init failed\n");
3788         }
3789
3790         r = amdgpu_device_ip_resume(adev);
3791         if (r) {
3792                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3793                 return r;
3794         }
3795         amdgpu_fence_driver_resume(adev);
3796
3797
3798         r = amdgpu_device_ip_late_init(adev);
3799         if (r)
3800                 return r;
3801
3802         queue_delayed_work(system_wq, &adev->delayed_init_work,
3803                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3804
3805         if (!amdgpu_device_has_dc_support(adev)) {
3806                 /* pin cursors */
3807                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3808                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3809
3810                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3811                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3812                                 r = amdgpu_bo_reserve(aobj, true);
3813                                 if (r == 0) {
3814                                         r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3815                                         if (r != 0)
3816                                                 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3817                                         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3818                                         amdgpu_bo_unreserve(aobj);
3819                                 }
3820                         }
3821                 }
3822         }
3823         r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3824         if (r)
3825                 return r;
3826
3827         /* Make sure IB tests flushed */
3828         flush_delayed_work(&adev->delayed_init_work);
3829
3830         /* blat the mode back in */
3831         if (fbcon) {
3832                 if (!amdgpu_device_has_dc_support(adev)) {
3833                         /* pre DCE11 */
3834                         drm_helper_resume_force_mode(dev);
3835
3836                         /* turn on display hw */
3837                         drm_modeset_lock_all(dev);
3838
3839                         drm_connector_list_iter_begin(dev, &iter);
3840                         drm_for_each_connector_iter(connector, &iter)
3841                                 drm_helper_connector_dpms(connector,
3842                                                           DRM_MODE_DPMS_ON);
3843                         drm_connector_list_iter_end(&iter);
3844
3845                         drm_modeset_unlock_all(dev);
3846                 }
3847                 amdgpu_fbdev_set_suspend(adev, 0);
3848         }
3849
3850         drm_kms_helper_poll_enable(dev);
3851
3852         amdgpu_ras_resume(adev);
3853
3854         /*
3855          * Most of the connector probing functions try to acquire runtime pm
3856          * refs to ensure that the GPU is powered on when connector polling is
3857          * performed. Since we're calling this from a runtime PM callback,
3858          * trying to acquire rpm refs will cause us to deadlock.
3859          *
3860          * Since we're guaranteed to be holding the rpm lock, it's safe to
3861          * temporarily disable the rpm helpers so this doesn't deadlock us.
3862          */
3863 #ifdef CONFIG_PM
3864         dev->dev->power.disable_depth++;
3865 #endif
3866         if (!amdgpu_device_has_dc_support(adev))
3867                 drm_helper_hpd_irq_event(dev);
3868         else
3869                 drm_kms_helper_hotplug_event(dev);
3870 #ifdef CONFIG_PM
3871         dev->dev->power.disable_depth--;
3872 #endif
3873         adev->in_suspend = false;
3874
3875         return 0;
3876 }
3877
3878 /**
3879  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3880  *
3881  * @adev: amdgpu_device pointer
3882  *
3883  * The list of all the hardware IPs that make up the asic is walked and
3884  * the check_soft_reset callbacks are run.  check_soft_reset determines
3885  * if the asic is still hung or not.
3886  * Returns true if any of the IPs are still in a hung state, false if not.
3887  */
3888 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3889 {
3890         int i;
3891         bool asic_hang = false;
3892
3893         if (amdgpu_sriov_vf(adev))
3894                 return true;
3895
3896         if (amdgpu_asic_need_full_reset(adev))
3897                 return true;
3898
3899         for (i = 0; i < adev->num_ip_blocks; i++) {
3900                 if (!adev->ip_blocks[i].status.valid)
3901                         continue;
3902                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3903                         adev->ip_blocks[i].status.hang =
3904                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3905                 if (adev->ip_blocks[i].status.hang) {
3906                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3907                         asic_hang = true;
3908                 }
3909         }
3910         return asic_hang;
3911 }
3912
3913 /**
3914  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3915  *
3916  * @adev: amdgpu_device pointer
3917  *
3918  * The list of all the hardware IPs that make up the asic is walked and the
3919  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3920  * handles any IP specific hardware or software state changes that are
3921  * necessary for a soft reset to succeed.
3922  * Returns 0 on success, negative error code on failure.
3923  */
3924 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3925 {
3926         int i, r = 0;
3927
3928         for (i = 0; i < adev->num_ip_blocks; i++) {
3929                 if (!adev->ip_blocks[i].status.valid)
3930                         continue;
3931                 if (adev->ip_blocks[i].status.hang &&
3932                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3933                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3934                         if (r)
3935                                 return r;
3936                 }
3937         }
3938
3939         return 0;
3940 }
3941
3942 /**
3943  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3944  *
3945  * @adev: amdgpu_device pointer
3946  *
3947  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3948  * reset is necessary to recover.
3949  * Returns true if a full asic reset is required, false if not.
3950  */
3951 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3952 {
3953         int i;
3954
3955         if (amdgpu_asic_need_full_reset(adev))
3956                 return true;
3957
3958         for (i = 0; i < adev->num_ip_blocks; i++) {
3959                 if (!adev->ip_blocks[i].status.valid)
3960                         continue;
3961                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3962                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3963                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3964                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3965                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3966                         if (adev->ip_blocks[i].status.hang) {
3967                                 dev_info(adev->dev, "Some block need full reset!\n");
3968                                 return true;
3969                         }
3970                 }
3971         }
3972         return false;
3973 }
3974
3975 /**
3976  * amdgpu_device_ip_soft_reset - do a soft reset
3977  *
3978  * @adev: amdgpu_device pointer
3979  *
3980  * The list of all the hardware IPs that make up the asic is walked and the
3981  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3982  * IP specific hardware or software state changes that are necessary to soft
3983  * reset the IP.
3984  * Returns 0 on success, negative error code on failure.
3985  */
3986 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3987 {
3988         int i, r = 0;
3989
3990         for (i = 0; i < adev->num_ip_blocks; i++) {
3991                 if (!adev->ip_blocks[i].status.valid)
3992                         continue;
3993                 if (adev->ip_blocks[i].status.hang &&
3994                     adev->ip_blocks[i].version->funcs->soft_reset) {
3995                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3996                         if (r)
3997                                 return r;
3998                 }
3999         }
4000
4001         return 0;
4002 }
4003
4004 /**
4005  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4006  *
4007  * @adev: amdgpu_device pointer
4008  *
4009  * The list of all the hardware IPs that make up the asic is walked and the
4010  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4011  * handles any IP specific hardware or software state changes that are
4012  * necessary after the IP has been soft reset.
4013  * Returns 0 on success, negative error code on failure.
4014  */
4015 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4016 {
4017         int i, r = 0;
4018
4019         for (i = 0; i < adev->num_ip_blocks; i++) {
4020                 if (!adev->ip_blocks[i].status.valid)
4021                         continue;
4022                 if (adev->ip_blocks[i].status.hang &&
4023                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4024                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4025                 if (r)
4026                         return r;
4027         }
4028
4029         return 0;
4030 }
4031
4032 /**
4033  * amdgpu_device_recover_vram - Recover some VRAM contents
4034  *
4035  * @adev: amdgpu_device pointer
4036  *
4037  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4038  * restore things like GPUVM page tables after a GPU reset where
4039  * the contents of VRAM might be lost.
4040  *
4041  * Returns:
4042  * 0 on success, negative error code on failure.
4043  */
4044 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4045 {
4046         struct dma_fence *fence = NULL, *next = NULL;
4047         struct amdgpu_bo *shadow;
4048         long r = 1, tmo;
4049
4050         if (amdgpu_sriov_runtime(adev))
4051                 tmo = msecs_to_jiffies(8000);
4052         else
4053                 tmo = msecs_to_jiffies(100);
4054
4055         dev_info(adev->dev, "recover vram bo from shadow start\n");
4056         mutex_lock(&adev->shadow_list_lock);
4057         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4058
4059                 /* No need to recover an evicted BO */
4060                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4061                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4062                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4063                         continue;
4064
4065                 r = amdgpu_bo_restore_shadow(shadow, &next);
4066                 if (r)
4067                         break;
4068
4069                 if (fence) {
4070                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4071                         dma_fence_put(fence);
4072                         fence = next;
4073                         if (tmo == 0) {
4074                                 r = -ETIMEDOUT;
4075                                 break;
4076                         } else if (tmo < 0) {
4077                                 r = tmo;
4078                                 break;
4079                         }
4080                 } else {
4081                         fence = next;
4082                 }
4083         }
4084         mutex_unlock(&adev->shadow_list_lock);
4085
4086         if (fence)
4087                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4088         dma_fence_put(fence);
4089
4090         if (r < 0 || tmo <= 0) {
4091                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4092                 return -EIO;
4093         }
4094
4095         dev_info(adev->dev, "recover vram bo from shadow done\n");
4096         return 0;
4097 }
4098
4099
4100 /**
4101  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4102  *
4103  * @adev: amdgpu_device pointer
4104  * @from_hypervisor: request from hypervisor
4105  *
4106  * do VF FLR and reinitialize Asic
4107  * return 0 means succeeded otherwise failed
4108  */
4109 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4110                                      bool from_hypervisor)
4111 {
4112         int r;
4113
4114         if (from_hypervisor)
4115                 r = amdgpu_virt_request_full_gpu(adev, true);
4116         else
4117                 r = amdgpu_virt_reset_gpu(adev);
4118         if (r)
4119                 return r;
4120
4121         amdgpu_amdkfd_pre_reset(adev);
4122
4123         /* Resume IP prior to SMC */
4124         r = amdgpu_device_ip_reinit_early_sriov(adev);
4125         if (r)
4126                 goto error;
4127
4128         amdgpu_virt_init_data_exchange(adev);
4129         /* we need recover gart prior to run SMC/CP/SDMA resume */
4130         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4131
4132         r = amdgpu_device_fw_loading(adev);
4133         if (r)
4134                 return r;
4135
4136         /* now we are okay to resume SMC/CP/SDMA */
4137         r = amdgpu_device_ip_reinit_late_sriov(adev);
4138         if (r)
4139                 goto error;
4140
4141         amdgpu_irq_gpu_reset_resume_helper(adev);
4142         r = amdgpu_ib_ring_tests(adev);
4143         amdgpu_amdkfd_post_reset(adev);
4144
4145 error:
4146         amdgpu_virt_release_full_gpu(adev, true);
4147         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4148                 amdgpu_inc_vram_lost(adev);
4149                 r = amdgpu_device_recover_vram(adev);
4150         }
4151
4152         return r;
4153 }
4154
4155 /**
4156  * amdgpu_device_has_job_running - check if there is any job in mirror list
4157  *
4158  * @adev: amdgpu_device pointer
4159  *
4160  * check if there is any job in mirror list
4161  */
4162 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4163 {
4164         int i;
4165         struct drm_sched_job *job;
4166
4167         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4168                 struct amdgpu_ring *ring = adev->rings[i];
4169
4170                 if (!ring || !ring->sched.thread)
4171                         continue;
4172
4173                 spin_lock(&ring->sched.job_list_lock);
4174                 job = list_first_entry_or_null(&ring->sched.pending_list,
4175                                                struct drm_sched_job, list);
4176                 spin_unlock(&ring->sched.job_list_lock);
4177                 if (job)
4178                         return true;
4179         }
4180         return false;
4181 }
4182
4183 /**
4184  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4185  *
4186  * @adev: amdgpu_device pointer
4187  *
4188  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4189  * a hung GPU.
4190  */
4191 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4192 {
4193         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4194                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4195                 return false;
4196         }
4197
4198         if (amdgpu_gpu_recovery == 0)
4199                 goto disabled;
4200
4201         if (amdgpu_sriov_vf(adev))
4202                 return true;
4203
4204         if (amdgpu_gpu_recovery == -1) {
4205                 switch (adev->asic_type) {
4206                 case CHIP_BONAIRE:
4207                 case CHIP_HAWAII:
4208                 case CHIP_TOPAZ:
4209                 case CHIP_TONGA:
4210                 case CHIP_FIJI:
4211                 case CHIP_POLARIS10:
4212                 case CHIP_POLARIS11:
4213                 case CHIP_POLARIS12:
4214                 case CHIP_VEGAM:
4215                 case CHIP_VEGA20:
4216                 case CHIP_VEGA10:
4217                 case CHIP_VEGA12:
4218                 case CHIP_RAVEN:
4219                 case CHIP_ARCTURUS:
4220                 case CHIP_RENOIR:
4221                 case CHIP_NAVI10:
4222                 case CHIP_NAVI14:
4223                 case CHIP_NAVI12:
4224                 case CHIP_SIENNA_CICHLID:
4225                 case CHIP_NAVY_FLOUNDER:
4226                 case CHIP_DIMGREY_CAVEFISH:
4227                         break;
4228                 default:
4229                         goto disabled;
4230                 }
4231         }
4232
4233         return true;
4234
4235 disabled:
4236                 dev_info(adev->dev, "GPU recovery disabled.\n");
4237                 return false;
4238 }
4239
4240
4241 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4242                                         struct amdgpu_job *job,
4243                                         bool *need_full_reset_arg)
4244 {
4245         int i, r = 0;
4246         bool need_full_reset  = *need_full_reset_arg;
4247
4248         amdgpu_debugfs_wait_dump(adev);
4249
4250         if (amdgpu_sriov_vf(adev)) {
4251                 /* stop the data exchange thread */
4252                 amdgpu_virt_fini_data_exchange(adev);
4253         }
4254
4255         /* block all schedulers and reset given job's ring */
4256         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4257                 struct amdgpu_ring *ring = adev->rings[i];
4258
4259                 if (!ring || !ring->sched.thread)
4260                         continue;
4261
4262                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4263                 amdgpu_fence_driver_force_completion(ring);
4264         }
4265
4266         if(job)
4267                 drm_sched_increase_karma(&job->base);
4268
4269         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4270         if (!amdgpu_sriov_vf(adev)) {
4271
4272                 if (!need_full_reset)
4273                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4274
4275                 if (!need_full_reset) {
4276                         amdgpu_device_ip_pre_soft_reset(adev);
4277                         r = amdgpu_device_ip_soft_reset(adev);
4278                         amdgpu_device_ip_post_soft_reset(adev);
4279                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4280                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4281                                 need_full_reset = true;
4282                         }
4283                 }
4284
4285                 if (need_full_reset)
4286                         r = amdgpu_device_ip_suspend(adev);
4287
4288                 *need_full_reset_arg = need_full_reset;
4289         }
4290
4291         return r;
4292 }
4293
4294 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4295                                struct list_head *device_list_handle,
4296                                bool *need_full_reset_arg,
4297                                bool skip_hw_reset)
4298 {
4299         struct amdgpu_device *tmp_adev = NULL;
4300         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4301         int r = 0;
4302
4303         /*
4304          * ASIC reset has to be done on all HGMI hive nodes ASAP
4305          * to allow proper links negotiation in FW (within 1 sec)
4306          */
4307         if (!skip_hw_reset && need_full_reset) {
4308                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4309                         /* For XGMI run all resets in parallel to speed up the process */
4310                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4311                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4312                                         r = -EALREADY;
4313                         } else
4314                                 r = amdgpu_asic_reset(tmp_adev);
4315
4316                         if (r) {
4317                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4318                                          r, adev_to_drm(tmp_adev)->unique);
4319                                 break;
4320                         }
4321                 }
4322
4323                 /* For XGMI wait for all resets to complete before proceed */
4324                 if (!r) {
4325                         list_for_each_entry(tmp_adev, device_list_handle,
4326                                             gmc.xgmi.head) {
4327                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4328                                         flush_work(&tmp_adev->xgmi_reset_work);
4329                                         r = tmp_adev->asic_reset_res;
4330                                         if (r)
4331                                                 break;
4332                                 }
4333                         }
4334                 }
4335         }
4336
4337         if (!r && amdgpu_ras_intr_triggered()) {
4338                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4339                         if (tmp_adev->mmhub.funcs &&
4340                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4341                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4342                 }
4343
4344                 amdgpu_ras_intr_cleared();
4345         }
4346
4347         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4348                 if (need_full_reset) {
4349                         /* post card */
4350                         if (amdgpu_device_asic_init(tmp_adev))
4351                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4352
4353                         if (!r) {
4354                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4355                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4356                                 if (r)
4357                                         goto out;
4358
4359                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4360                                 if (vram_lost) {
4361                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4362                                         amdgpu_inc_vram_lost(tmp_adev);
4363                                 }
4364
4365                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4366                                 if (r)
4367                                         goto out;
4368
4369                                 r = amdgpu_device_fw_loading(tmp_adev);
4370                                 if (r)
4371                                         return r;
4372
4373                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4374                                 if (r)
4375                                         goto out;
4376
4377                                 if (vram_lost)
4378                                         amdgpu_device_fill_reset_magic(tmp_adev);
4379
4380                                 /*
4381                                  * Add this ASIC as tracked as reset was already
4382                                  * complete successfully.
4383                                  */
4384                                 amdgpu_register_gpu_instance(tmp_adev);
4385
4386                                 r = amdgpu_device_ip_late_init(tmp_adev);
4387                                 if (r)
4388                                         goto out;
4389
4390                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4391
4392                                 /*
4393                                  * The GPU enters bad state once faulty pages
4394                                  * by ECC has reached the threshold, and ras
4395                                  * recovery is scheduled next. So add one check
4396                                  * here to break recovery if it indeed exceeds
4397                                  * bad page threshold, and remind user to
4398                                  * retire this GPU or setting one bigger
4399                                  * bad_page_threshold value to fix this once
4400                                  * probing driver again.
4401                                  */
4402                                 if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
4403                                         /* must succeed. */
4404                                         amdgpu_ras_resume(tmp_adev);
4405                                 } else {
4406                                         r = -EINVAL;
4407                                         goto out;
4408                                 }
4409
4410                                 /* Update PSP FW topology after reset */
4411                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4412                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4413                         }
4414                 }
4415
4416 out:
4417                 if (!r) {
4418                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4419                         r = amdgpu_ib_ring_tests(tmp_adev);
4420                         if (r) {
4421                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4422                                 r = amdgpu_device_ip_suspend(tmp_adev);
4423                                 need_full_reset = true;
4424                                 r = -EAGAIN;
4425                                 goto end;
4426                         }
4427                 }
4428
4429                 if (!r)
4430                         r = amdgpu_device_recover_vram(tmp_adev);
4431                 else
4432                         tmp_adev->asic_reset_res = r;
4433         }
4434
4435 end:
4436         *need_full_reset_arg = need_full_reset;
4437         return r;
4438 }
4439
4440 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4441                                 struct amdgpu_hive_info *hive)
4442 {
4443         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4444                 return false;
4445
4446         if (hive) {
4447                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4448         } else {
4449                 down_write(&adev->reset_sem);
4450         }
4451
4452         switch (amdgpu_asic_reset_method(adev)) {
4453         case AMD_RESET_METHOD_MODE1:
4454                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4455                 break;
4456         case AMD_RESET_METHOD_MODE2:
4457                 adev->mp1_state = PP_MP1_STATE_RESET;
4458                 break;
4459         default:
4460                 adev->mp1_state = PP_MP1_STATE_NONE;
4461                 break;
4462         }
4463
4464         return true;
4465 }
4466
4467 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4468 {
4469         amdgpu_vf_error_trans_all(adev);
4470         adev->mp1_state = PP_MP1_STATE_NONE;
4471         atomic_set(&adev->in_gpu_reset, 0);
4472         up_write(&adev->reset_sem);
4473 }
4474
4475 /*
4476  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4477  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4478  *
4479  * unlock won't require roll back.
4480  */
4481 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4482 {
4483         struct amdgpu_device *tmp_adev = NULL;
4484
4485         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4486                 if (!hive) {
4487                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4488                         return -ENODEV;
4489                 }
4490                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4491                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4492                                 goto roll_back;
4493                 }
4494         } else if (!amdgpu_device_lock_adev(adev, hive))
4495                 return -EAGAIN;
4496
4497         return 0;
4498 roll_back:
4499         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4500                 /*
4501                  * if the lockup iteration break in the middle of a hive,
4502                  * it may means there may has a race issue,
4503                  * or a hive device locked up independently.
4504                  * we may be in trouble and may not, so will try to roll back
4505                  * the lock and give out a warnning.
4506                  */
4507                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4508                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4509                         amdgpu_device_unlock_adev(tmp_adev);
4510                 }
4511         }
4512         return -EAGAIN;
4513 }
4514
4515 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4516 {
4517         struct pci_dev *p = NULL;
4518
4519         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4520                         adev->pdev->bus->number, 1);
4521         if (p) {
4522                 pm_runtime_enable(&(p->dev));
4523                 pm_runtime_resume(&(p->dev));
4524         }
4525 }
4526
4527 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4528 {
4529         enum amd_reset_method reset_method;
4530         struct pci_dev *p = NULL;
4531         u64 expires;
4532
4533         /*
4534          * For now, only BACO and mode1 reset are confirmed
4535          * to suffer the audio issue without proper suspended.
4536          */
4537         reset_method = amdgpu_asic_reset_method(adev);
4538         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4539              (reset_method != AMD_RESET_METHOD_MODE1))
4540                 return -EINVAL;
4541
4542         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4543                         adev->pdev->bus->number, 1);
4544         if (!p)
4545                 return -ENODEV;
4546
4547         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4548         if (!expires)
4549                 /*
4550                  * If we cannot get the audio device autosuspend delay,
4551                  * a fixed 4S interval will be used. Considering 3S is
4552                  * the audio controller default autosuspend delay setting.
4553                  * 4S used here is guaranteed to cover that.
4554                  */
4555                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4556
4557         while (!pm_runtime_status_suspended(&(p->dev))) {
4558                 if (!pm_runtime_suspend(&(p->dev)))
4559                         break;
4560
4561                 if (expires < ktime_get_mono_fast_ns()) {
4562                         dev_warn(adev->dev, "failed to suspend display audio\n");
4563                         /* TODO: abort the succeeding gpu reset? */
4564                         return -ETIMEDOUT;
4565                 }
4566         }
4567
4568         pm_runtime_disable(&(p->dev));
4569
4570         return 0;
4571 }
4572
4573 /**
4574  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4575  *
4576  * @adev: amdgpu_device pointer
4577  * @job: which job trigger hang
4578  *
4579  * Attempt to reset the GPU if it has hung (all asics).
4580  * Attempt to do soft-reset or full-reset and reinitialize Asic
4581  * Returns 0 for success or an error on failure.
4582  */
4583
4584 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4585                               struct amdgpu_job *job)
4586 {
4587         struct list_head device_list, *device_list_handle =  NULL;
4588         bool need_full_reset = false;
4589         bool job_signaled = false;
4590         struct amdgpu_hive_info *hive = NULL;
4591         struct amdgpu_device *tmp_adev = NULL;
4592         int i, r = 0;
4593         bool need_emergency_restart = false;
4594         bool audio_suspended = false;
4595
4596         /*
4597          * Special case: RAS triggered and full reset isn't supported
4598          */
4599         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4600
4601         /*
4602          * Flush RAM to disk so that after reboot
4603          * the user can read log and see why the system rebooted.
4604          */
4605         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4606                 DRM_WARN("Emergency reboot.");
4607
4608                 ksys_sync_helper();
4609                 emergency_restart();
4610         }
4611
4612         dev_info(adev->dev, "GPU %s begin!\n",
4613                 need_emergency_restart ? "jobs stop":"reset");
4614
4615         /*
4616          * Here we trylock to avoid chain of resets executing from
4617          * either trigger by jobs on different adevs in XGMI hive or jobs on
4618          * different schedulers for same device while this TO handler is running.
4619          * We always reset all schedulers for device and all devices for XGMI
4620          * hive so that should take care of them too.
4621          */
4622         hive = amdgpu_get_xgmi_hive(adev);
4623         if (hive) {
4624                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4625                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4626                                 job ? job->base.id : -1, hive->hive_id);
4627                         amdgpu_put_xgmi_hive(hive);
4628                         if (job)
4629                                 drm_sched_increase_karma(&job->base);
4630                         return 0;
4631                 }
4632                 mutex_lock(&hive->hive_lock);
4633         }
4634
4635         /*
4636          * lock the device before we try to operate the linked list
4637          * if didn't get the device lock, don't touch the linked list since
4638          * others may iterating it.
4639          */
4640         r = amdgpu_device_lock_hive_adev(adev, hive);
4641         if (r) {
4642                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4643                                         job ? job->base.id : -1);
4644
4645                 /* even we skipped this reset, still need to set the job to guilty */
4646                 if (job)
4647                         drm_sched_increase_karma(&job->base);
4648                 goto skip_recovery;
4649         }
4650
4651         /*
4652          * Build list of devices to reset.
4653          * In case we are in XGMI hive mode, resort the device list
4654          * to put adev in the 1st position.
4655          */
4656         INIT_LIST_HEAD(&device_list);
4657         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4658                 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4659                         list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4660                 device_list_handle = &hive->device_list;
4661         } else {
4662                 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4663                 device_list_handle = &device_list;
4664         }
4665
4666         /* block all schedulers and reset given job's ring */
4667         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4668                 /*
4669                  * Try to put the audio codec into suspend state
4670                  * before gpu reset started.
4671                  *
4672                  * Due to the power domain of the graphics device
4673                  * is shared with AZ power domain. Without this,
4674                  * we may change the audio hardware from behind
4675                  * the audio driver's back. That will trigger
4676                  * some audio codec errors.
4677                  */
4678                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4679                         audio_suspended = true;
4680
4681                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4682
4683                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4684
4685                 if (!amdgpu_sriov_vf(tmp_adev))
4686                         amdgpu_amdkfd_pre_reset(tmp_adev);
4687
4688                 /*
4689                  * Mark these ASICs to be reseted as untracked first
4690                  * And add them back after reset completed
4691                  */
4692                 amdgpu_unregister_gpu_instance(tmp_adev);
4693
4694                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4695
4696                 /* disable ras on ALL IPs */
4697                 if (!need_emergency_restart &&
4698                       amdgpu_device_ip_need_full_reset(tmp_adev))
4699                         amdgpu_ras_suspend(tmp_adev);
4700
4701                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4702                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4703
4704                         if (!ring || !ring->sched.thread)
4705                                 continue;
4706
4707                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4708
4709                         if (need_emergency_restart)
4710                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4711                 }
4712                 atomic_inc(&tmp_adev->gpu_reset_counter);
4713         }
4714
4715         if (need_emergency_restart)
4716                 goto skip_sched_resume;
4717
4718         /*
4719          * Must check guilty signal here since after this point all old
4720          * HW fences are force signaled.
4721          *
4722          * job->base holds a reference to parent fence
4723          */
4724         if (job && job->base.s_fence->parent &&
4725             dma_fence_is_signaled(job->base.s_fence->parent)) {
4726                 job_signaled = true;
4727                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4728                 goto skip_hw_reset;
4729         }
4730
4731 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4732         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4733                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4734                                                  (tmp_adev == adev) ? job : NULL,
4735                                                  &need_full_reset);
4736                 /*TODO Should we stop ?*/
4737                 if (r) {
4738                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4739                                   r, adev_to_drm(tmp_adev)->unique);
4740                         tmp_adev->asic_reset_res = r;
4741                 }
4742         }
4743
4744         /* Actual ASIC resets if needed.*/
4745         /* TODO Implement XGMI hive reset logic for SRIOV */
4746         if (amdgpu_sriov_vf(adev)) {
4747                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4748                 if (r)
4749                         adev->asic_reset_res = r;
4750         } else {
4751                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4752                 if (r && r == -EAGAIN)
4753                         goto retry;
4754         }
4755
4756 skip_hw_reset:
4757
4758         /* Post ASIC reset for all devs .*/
4759         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4760
4761                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4762                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4763
4764                         if (!ring || !ring->sched.thread)
4765                                 continue;
4766
4767                         /* No point to resubmit jobs if we didn't HW reset*/
4768                         if (!tmp_adev->asic_reset_res && !job_signaled)
4769                                 drm_sched_resubmit_jobs(&ring->sched);
4770
4771                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4772                 }
4773
4774                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4775                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4776                 }
4777
4778                 tmp_adev->asic_reset_res = 0;
4779
4780                 if (r) {
4781                         /* bad news, how to tell it to userspace ? */
4782                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4783                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4784                 } else {
4785                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4786                 }
4787         }
4788
4789 skip_sched_resume:
4790         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4791                 /*unlock kfd: SRIOV would do it separately */
4792                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4793                         amdgpu_amdkfd_post_reset(tmp_adev);
4794                 if (audio_suspended)
4795                         amdgpu_device_resume_display_audio(tmp_adev);
4796                 amdgpu_device_unlock_adev(tmp_adev);
4797         }
4798
4799 skip_recovery:
4800         if (hive) {
4801                 atomic_set(&hive->in_reset, 0);
4802                 mutex_unlock(&hive->hive_lock);
4803                 amdgpu_put_xgmi_hive(hive);
4804         }
4805
4806         if (r && r != -EAGAIN)
4807                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4808         return r;
4809 }
4810
4811 /**
4812  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4813  *
4814  * @adev: amdgpu_device pointer
4815  *
4816  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4817  * and lanes) of the slot the device is in. Handles APUs and
4818  * virtualized environments where PCIE config space may not be available.
4819  */
4820 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4821 {
4822         struct pci_dev *pdev;
4823         enum pci_bus_speed speed_cap, platform_speed_cap;
4824         enum pcie_link_width platform_link_width;
4825
4826         if (amdgpu_pcie_gen_cap)
4827                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4828
4829         if (amdgpu_pcie_lane_cap)
4830                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4831
4832         /* covers APUs as well */
4833         if (pci_is_root_bus(adev->pdev->bus)) {
4834                 if (adev->pm.pcie_gen_mask == 0)
4835                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4836                 if (adev->pm.pcie_mlw_mask == 0)
4837                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4838                 return;
4839         }
4840
4841         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4842                 return;
4843
4844         pcie_bandwidth_available(adev->pdev, NULL,
4845                                  &platform_speed_cap, &platform_link_width);
4846
4847         if (adev->pm.pcie_gen_mask == 0) {
4848                 /* asic caps */
4849                 pdev = adev->pdev;
4850                 speed_cap = pcie_get_speed_cap(pdev);
4851                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4852                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4853                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4854                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4855                 } else {
4856                         if (speed_cap == PCIE_SPEED_32_0GT)
4857                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4858                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4859                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4860                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4861                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
4862                         else if (speed_cap == PCIE_SPEED_16_0GT)
4863                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4864                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4865                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4866                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4867                         else if (speed_cap == PCIE_SPEED_8_0GT)
4868                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4869                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4870                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4871                         else if (speed_cap == PCIE_SPEED_5_0GT)
4872                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4873                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4874                         else
4875                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4876                 }
4877                 /* platform caps */
4878                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4879                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4880                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4881                 } else {
4882                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
4883                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4884                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4885                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4886                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4887                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
4888                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4889                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4890                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4891                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4892                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4893                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4894                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4895                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4896                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4897                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4898                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4899                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4900                         else
4901                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4902
4903                 }
4904         }
4905         if (adev->pm.pcie_mlw_mask == 0) {
4906                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4907                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4908                 } else {
4909                         switch (platform_link_width) {
4910                         case PCIE_LNK_X32:
4911                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4912                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4913                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4914                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4915                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4916                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4917                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4918                                 break;
4919                         case PCIE_LNK_X16:
4920                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4921                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4922                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4923                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4924                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4925                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4926                                 break;
4927                         case PCIE_LNK_X12:
4928                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4929                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4930                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4931                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4932                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4933                                 break;
4934                         case PCIE_LNK_X8:
4935                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4936                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4937                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4938                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4939                                 break;
4940                         case PCIE_LNK_X4:
4941                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4942                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4943                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4944                                 break;
4945                         case PCIE_LNK_X2:
4946                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4947                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4948                                 break;
4949                         case PCIE_LNK_X1:
4950                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4951                                 break;
4952                         default:
4953                                 break;
4954                         }
4955                 }
4956         }
4957 }
4958
4959 int amdgpu_device_baco_enter(struct drm_device *dev)
4960 {
4961         struct amdgpu_device *adev = drm_to_adev(dev);
4962         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4963
4964         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4965                 return -ENOTSUPP;
4966
4967         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4968                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4969
4970         return amdgpu_dpm_baco_enter(adev);
4971 }
4972
4973 int amdgpu_device_baco_exit(struct drm_device *dev)
4974 {
4975         struct amdgpu_device *adev = drm_to_adev(dev);
4976         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4977         int ret = 0;
4978
4979         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4980                 return -ENOTSUPP;
4981
4982         ret = amdgpu_dpm_baco_exit(adev);
4983         if (ret)
4984                 return ret;
4985
4986         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4987                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4988
4989         return 0;
4990 }
4991
4992 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
4993 {
4994         int i;
4995
4996         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4997                 struct amdgpu_ring *ring = adev->rings[i];
4998
4999                 if (!ring || !ring->sched.thread)
5000                         continue;
5001
5002                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5003         }
5004 }
5005
5006 /**
5007  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5008  * @pdev: PCI device struct
5009  * @state: PCI channel state
5010  *
5011  * Description: Called when a PCI error is detected.
5012  *
5013  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5014  */
5015 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5016 {
5017         struct drm_device *dev = pci_get_drvdata(pdev);
5018         struct amdgpu_device *adev = drm_to_adev(dev);
5019         int i;
5020
5021         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5022
5023         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5024                 DRM_WARN("No support for XGMI hive yet...");
5025                 return PCI_ERS_RESULT_DISCONNECT;
5026         }
5027
5028         switch (state) {
5029         case pci_channel_io_normal:
5030                 return PCI_ERS_RESULT_CAN_RECOVER;
5031         /* Fatal error, prepare for slot reset */
5032         case pci_channel_io_frozen:
5033                 /*
5034                  * Cancel and wait for all TDRs in progress if failing to
5035                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5036                  *
5037                  * Locking adev->reset_sem will prevent any external access
5038                  * to GPU during PCI error recovery
5039                  */
5040                 while (!amdgpu_device_lock_adev(adev, NULL))
5041                         amdgpu_cancel_all_tdr(adev);
5042
5043                 /*
5044                  * Block any work scheduling as we do for regular GPU reset
5045                  * for the duration of the recovery
5046                  */
5047                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5048                         struct amdgpu_ring *ring = adev->rings[i];
5049
5050                         if (!ring || !ring->sched.thread)
5051                                 continue;
5052
5053                         drm_sched_stop(&ring->sched, NULL);
5054                 }
5055                 atomic_inc(&adev->gpu_reset_counter);
5056                 return PCI_ERS_RESULT_NEED_RESET;
5057         case pci_channel_io_perm_failure:
5058                 /* Permanent error, prepare for device removal */
5059                 return PCI_ERS_RESULT_DISCONNECT;
5060         }
5061
5062         return PCI_ERS_RESULT_NEED_RESET;
5063 }
5064
5065 /**
5066  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5067  * @pdev: pointer to PCI device
5068  */
5069 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5070 {
5071
5072         DRM_INFO("PCI error: mmio enabled callback!!\n");
5073
5074         /* TODO - dump whatever for debugging purposes */
5075
5076         /* This called only if amdgpu_pci_error_detected returns
5077          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5078          * works, no need to reset slot.
5079          */
5080
5081         return PCI_ERS_RESULT_RECOVERED;
5082 }
5083
5084 /**
5085  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5086  * @pdev: PCI device struct
5087  *
5088  * Description: This routine is called by the pci error recovery
5089  * code after the PCI slot has been reset, just before we
5090  * should resume normal operations.
5091  */
5092 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5093 {
5094         struct drm_device *dev = pci_get_drvdata(pdev);
5095         struct amdgpu_device *adev = drm_to_adev(dev);
5096         int r, i;
5097         bool need_full_reset = true;
5098         u32 memsize;
5099         struct list_head device_list;
5100
5101         DRM_INFO("PCI error: slot reset callback!!\n");
5102
5103         INIT_LIST_HEAD(&device_list);
5104         list_add_tail(&adev->gmc.xgmi.head, &device_list);
5105
5106         /* wait for asic to come out of reset */
5107         msleep(500);
5108
5109         /* Restore PCI confspace */
5110         amdgpu_device_load_pci_state(pdev);
5111
5112         /* confirm  ASIC came out of reset */
5113         for (i = 0; i < adev->usec_timeout; i++) {
5114                 memsize = amdgpu_asic_get_config_memsize(adev);
5115
5116                 if (memsize != 0xffffffff)
5117                         break;
5118                 udelay(1);
5119         }
5120         if (memsize == 0xffffffff) {
5121                 r = -ETIME;
5122                 goto out;
5123         }
5124
5125         adev->in_pci_err_recovery = true;
5126         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5127         adev->in_pci_err_recovery = false;
5128         if (r)
5129                 goto out;
5130
5131         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5132
5133 out:
5134         if (!r) {
5135                 if (amdgpu_device_cache_pci_state(adev->pdev))
5136                         pci_restore_state(adev->pdev);
5137
5138                 DRM_INFO("PCIe error recovery succeeded\n");
5139         } else {
5140                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5141                 amdgpu_device_unlock_adev(adev);
5142         }
5143
5144         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5145 }
5146
5147 /**
5148  * amdgpu_pci_resume() - resume normal ops after PCI reset
5149  * @pdev: pointer to PCI device
5150  *
5151  * Called when the error recovery driver tells us that its
5152  * OK to resume normal operation.
5153  */
5154 void amdgpu_pci_resume(struct pci_dev *pdev)
5155 {
5156         struct drm_device *dev = pci_get_drvdata(pdev);
5157         struct amdgpu_device *adev = drm_to_adev(dev);
5158         int i;
5159
5160
5161         DRM_INFO("PCI error: resume callback!!\n");
5162
5163         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5164                 struct amdgpu_ring *ring = adev->rings[i];
5165
5166                 if (!ring || !ring->sched.thread)
5167                         continue;
5168
5169
5170                 drm_sched_resubmit_jobs(&ring->sched);
5171                 drm_sched_start(&ring->sched, true);
5172         }
5173
5174         amdgpu_device_unlock_adev(adev);
5175 }
5176
5177 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5178 {
5179         struct drm_device *dev = pci_get_drvdata(pdev);
5180         struct amdgpu_device *adev = drm_to_adev(dev);
5181         int r;
5182
5183         r = pci_save_state(pdev);
5184         if (!r) {
5185                 kfree(adev->pci_state);
5186
5187                 adev->pci_state = pci_store_saved_state(pdev);
5188
5189                 if (!adev->pci_state) {
5190                         DRM_ERROR("Failed to store PCI saved state");
5191                         return false;
5192                 }
5193         } else {
5194                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5195                 return false;
5196         }
5197
5198         return true;
5199 }
5200
5201 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5202 {
5203         struct drm_device *dev = pci_get_drvdata(pdev);
5204         struct amdgpu_device *adev = drm_to_adev(dev);
5205         int r;
5206
5207         if (!adev->pci_state)
5208                 return false;
5209
5210         r = pci_load_saved_state(pdev, adev->pci_state);
5211
5212         if (!r) {
5213                 pci_restore_state(pdev);
5214         } else {
5215                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5216                 return false;
5217         }
5218
5219         return true;
5220 }
5221
5222