Merge tag 'drm-misc-next-fixes-2021-09-09' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73
74 #include <drm/drm_drv.h>
75
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
88
89 #define AMDGPU_RESUME_MS                2000
90
91 const char *amdgpu_asic_name[] = {
92         "TAHITI",
93         "PITCAIRN",
94         "VERDE",
95         "OLAND",
96         "HAINAN",
97         "BONAIRE",
98         "KAVERI",
99         "KABINI",
100         "HAWAII",
101         "MULLINS",
102         "TOPAZ",
103         "TONGA",
104         "FIJI",
105         "CARRIZO",
106         "STONEY",
107         "POLARIS10",
108         "POLARIS11",
109         "POLARIS12",
110         "VEGAM",
111         "VEGA10",
112         "VEGA12",
113         "VEGA20",
114         "RAVEN",
115         "ARCTURUS",
116         "RENOIR",
117         "ALDEBARAN",
118         "NAVI10",
119         "CYAN_SKILLFISH",
120         "NAVI14",
121         "NAVI12",
122         "SIENNA_CICHLID",
123         "NAVY_FLOUNDER",
124         "VANGOGH",
125         "DIMGREY_CAVEFISH",
126         "BEIGE_GOBY",
127         "YELLOW_CARP",
128         "LAST",
129 };
130
131 /**
132  * DOC: pcie_replay_count
133  *
134  * The amdgpu driver provides a sysfs API for reporting the total number
135  * of PCIe replays (NAKs)
136  * The file pcie_replay_count is used for this and returns the total
137  * number of replays as a sum of the NAKs generated and NAKs received
138  */
139
140 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
141                 struct device_attribute *attr, char *buf)
142 {
143         struct drm_device *ddev = dev_get_drvdata(dev);
144         struct amdgpu_device *adev = drm_to_adev(ddev);
145         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
146
147         return sysfs_emit(buf, "%llu\n", cnt);
148 }
149
150 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
151                 amdgpu_device_get_pcie_replay_count, NULL);
152
153 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
154
155 /**
156  * DOC: product_name
157  *
158  * The amdgpu driver provides a sysfs API for reporting the product name
159  * for the device
160  * The file serial_number is used for this and returns the product name
161  * as returned from the FRU.
162  * NOTE: This is only available for certain server cards
163  */
164
165 static ssize_t amdgpu_device_get_product_name(struct device *dev,
166                 struct device_attribute *attr, char *buf)
167 {
168         struct drm_device *ddev = dev_get_drvdata(dev);
169         struct amdgpu_device *adev = drm_to_adev(ddev);
170
171         return sysfs_emit(buf, "%s\n", adev->product_name);
172 }
173
174 static DEVICE_ATTR(product_name, S_IRUGO,
175                 amdgpu_device_get_product_name, NULL);
176
177 /**
178  * DOC: product_number
179  *
180  * The amdgpu driver provides a sysfs API for reporting the part number
181  * for the device
182  * The file serial_number is used for this and returns the part number
183  * as returned from the FRU.
184  * NOTE: This is only available for certain server cards
185  */
186
187 static ssize_t amdgpu_device_get_product_number(struct device *dev,
188                 struct device_attribute *attr, char *buf)
189 {
190         struct drm_device *ddev = dev_get_drvdata(dev);
191         struct amdgpu_device *adev = drm_to_adev(ddev);
192
193         return sysfs_emit(buf, "%s\n", adev->product_number);
194 }
195
196 static DEVICE_ATTR(product_number, S_IRUGO,
197                 amdgpu_device_get_product_number, NULL);
198
199 /**
200  * DOC: serial_number
201  *
202  * The amdgpu driver provides a sysfs API for reporting the serial number
203  * for the device
204  * The file serial_number is used for this and returns the serial number
205  * as returned from the FRU.
206  * NOTE: This is only available for certain server cards
207  */
208
209 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
210                 struct device_attribute *attr, char *buf)
211 {
212         struct drm_device *ddev = dev_get_drvdata(dev);
213         struct amdgpu_device *adev = drm_to_adev(ddev);
214
215         return sysfs_emit(buf, "%s\n", adev->serial);
216 }
217
218 static DEVICE_ATTR(serial_number, S_IRUGO,
219                 amdgpu_device_get_serial_number, NULL);
220
221 /**
222  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
223  *
224  * @dev: drm_device pointer
225  *
226  * Returns true if the device is a dGPU with ATPX power control,
227  * otherwise return false.
228  */
229 bool amdgpu_device_supports_px(struct drm_device *dev)
230 {
231         struct amdgpu_device *adev = drm_to_adev(dev);
232
233         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
234                 return true;
235         return false;
236 }
237
238 /**
239  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
240  *
241  * @dev: drm_device pointer
242  *
243  * Returns true if the device is a dGPU with ACPI power control,
244  * otherwise return false.
245  */
246 bool amdgpu_device_supports_boco(struct drm_device *dev)
247 {
248         struct amdgpu_device *adev = drm_to_adev(dev);
249
250         if (adev->has_pr3 ||
251             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
252                 return true;
253         return false;
254 }
255
256 /**
257  * amdgpu_device_supports_baco - Does the device support BACO
258  *
259  * @dev: drm_device pointer
260  *
261  * Returns true if the device supporte BACO,
262  * otherwise return false.
263  */
264 bool amdgpu_device_supports_baco(struct drm_device *dev)
265 {
266         struct amdgpu_device *adev = drm_to_adev(dev);
267
268         return amdgpu_asic_supports_baco(adev);
269 }
270
271 /**
272  * amdgpu_device_supports_smart_shift - Is the device dGPU with
273  * smart shift support
274  *
275  * @dev: drm_device pointer
276  *
277  * Returns true if the device is a dGPU with Smart Shift support,
278  * otherwise returns false.
279  */
280 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
281 {
282         return (amdgpu_device_supports_boco(dev) &&
283                 amdgpu_acpi_is_power_shift_control_supported());
284 }
285
286 /*
287  * VRAM access helper functions
288  */
289
290 /**
291  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
292  *
293  * @adev: amdgpu_device pointer
294  * @pos: offset of the buffer in vram
295  * @buf: virtual address of the buffer in system memory
296  * @size: read/write size, sizeof(@buf) must > @size
297  * @write: true - write to vram, otherwise - read from vram
298  */
299 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
300                              void *buf, size_t size, bool write)
301 {
302         unsigned long flags;
303         uint32_t hi = ~0, tmp = 0;
304         uint32_t *data = buf;
305         uint64_t last;
306         int idx;
307
308         if (!drm_dev_enter(&adev->ddev, &idx))
309                 return;
310
311         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
312
313         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
314         for (last = pos + size; pos < last; pos += 4) {
315                 tmp = pos >> 31;
316
317                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
318                 if (tmp != hi) {
319                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
320                         hi = tmp;
321                 }
322                 if (write)
323                         WREG32_NO_KIQ(mmMM_DATA, *data++);
324                 else
325                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
326         }
327
328         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
329         drm_dev_exit(idx);
330 }
331
332 /**
333  * amdgpu_device_vram_access - access vram by vram aperature
334  *
335  * @adev: amdgpu_device pointer
336  * @pos: offset of the buffer in vram
337  * @buf: virtual address of the buffer in system memory
338  * @size: read/write size, sizeof(@buf) must > @size
339  * @write: true - write to vram, otherwise - read from vram
340  *
341  * The return value means how many bytes have been transferred.
342  */
343 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
344                                  void *buf, size_t size, bool write)
345 {
346 #ifdef CONFIG_64BIT
347         void __iomem *addr;
348         size_t count = 0;
349         uint64_t last;
350
351         if (!adev->mman.aper_base_kaddr)
352                 return 0;
353
354         last = min(pos + size, adev->gmc.visible_vram_size);
355         if (last > pos) {
356                 addr = adev->mman.aper_base_kaddr + pos;
357                 count = last - pos;
358
359                 if (write) {
360                         memcpy_toio(addr, buf, count);
361                         mb();
362                         amdgpu_device_flush_hdp(adev, NULL);
363                 } else {
364                         amdgpu_device_invalidate_hdp(adev, NULL);
365                         mb();
366                         memcpy_fromio(buf, addr, count);
367                 }
368
369         }
370
371         return count;
372 #else
373         return 0;
374 #endif
375 }
376
377 /**
378  * amdgpu_device_vram_access - read/write a buffer in vram
379  *
380  * @adev: amdgpu_device pointer
381  * @pos: offset of the buffer in vram
382  * @buf: virtual address of the buffer in system memory
383  * @size: read/write size, sizeof(@buf) must > @size
384  * @write: true - write to vram, otherwise - read from vram
385  */
386 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
387                                void *buf, size_t size, bool write)
388 {
389         size_t count;
390
391         /* try to using vram apreature to access vram first */
392         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
393         size -= count;
394         if (size) {
395                 /* using MM to access rest vram */
396                 pos += count;
397                 buf += count;
398                 amdgpu_device_mm_access(adev, pos, buf, size, write);
399         }
400 }
401
402 /*
403  * register access helper functions.
404  */
405
406 /* Check if hw access should be skipped because of hotplug or device error */
407 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
408 {
409         if (adev->no_hw_access)
410                 return true;
411
412 #ifdef CONFIG_LOCKDEP
413         /*
414          * This is a bit complicated to understand, so worth a comment. What we assert
415          * here is that the GPU reset is not running on another thread in parallel.
416          *
417          * For this we trylock the read side of the reset semaphore, if that succeeds
418          * we know that the reset is not running in paralell.
419          *
420          * If the trylock fails we assert that we are either already holding the read
421          * side of the lock or are the reset thread itself and hold the write side of
422          * the lock.
423          */
424         if (in_task()) {
425                 if (down_read_trylock(&adev->reset_sem))
426                         up_read(&adev->reset_sem);
427                 else
428                         lockdep_assert_held(&adev->reset_sem);
429         }
430 #endif
431         return false;
432 }
433
434 /**
435  * amdgpu_device_rreg - read a memory mapped IO or indirect register
436  *
437  * @adev: amdgpu_device pointer
438  * @reg: dword aligned register offset
439  * @acc_flags: access flags which require special behavior
440  *
441  * Returns the 32 bit value from the offset specified.
442  */
443 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
444                             uint32_t reg, uint32_t acc_flags)
445 {
446         uint32_t ret;
447
448         if (amdgpu_device_skip_hw_access(adev))
449                 return 0;
450
451         if ((reg * 4) < adev->rmmio_size) {
452                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
453                     amdgpu_sriov_runtime(adev) &&
454                     down_read_trylock(&adev->reset_sem)) {
455                         ret = amdgpu_kiq_rreg(adev, reg);
456                         up_read(&adev->reset_sem);
457                 } else {
458                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
459                 }
460         } else {
461                 ret = adev->pcie_rreg(adev, reg * 4);
462         }
463
464         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
465
466         return ret;
467 }
468
469 /*
470  * MMIO register read with bytes helper functions
471  * @offset:bytes offset from MMIO start
472  *
473 */
474
475 /**
476  * amdgpu_mm_rreg8 - read a memory mapped IO register
477  *
478  * @adev: amdgpu_device pointer
479  * @offset: byte aligned register offset
480  *
481  * Returns the 8 bit value from the offset specified.
482  */
483 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
484 {
485         if (amdgpu_device_skip_hw_access(adev))
486                 return 0;
487
488         if (offset < adev->rmmio_size)
489                 return (readb(adev->rmmio + offset));
490         BUG();
491 }
492
493 /*
494  * MMIO register write with bytes helper functions
495  * @offset:bytes offset from MMIO start
496  * @value: the value want to be written to the register
497  *
498 */
499 /**
500  * amdgpu_mm_wreg8 - read a memory mapped IO register
501  *
502  * @adev: amdgpu_device pointer
503  * @offset: byte aligned register offset
504  * @value: 8 bit value to write
505  *
506  * Writes the value specified to the offset specified.
507  */
508 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
509 {
510         if (amdgpu_device_skip_hw_access(adev))
511                 return;
512
513         if (offset < adev->rmmio_size)
514                 writeb(value, adev->rmmio + offset);
515         else
516                 BUG();
517 }
518
519 /**
520  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
521  *
522  * @adev: amdgpu_device pointer
523  * @reg: dword aligned register offset
524  * @v: 32 bit value to write to the register
525  * @acc_flags: access flags which require special behavior
526  *
527  * Writes the value specified to the offset specified.
528  */
529 void amdgpu_device_wreg(struct amdgpu_device *adev,
530                         uint32_t reg, uint32_t v,
531                         uint32_t acc_flags)
532 {
533         if (amdgpu_device_skip_hw_access(adev))
534                 return;
535
536         if ((reg * 4) < adev->rmmio_size) {
537                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
538                     amdgpu_sriov_runtime(adev) &&
539                     down_read_trylock(&adev->reset_sem)) {
540                         amdgpu_kiq_wreg(adev, reg, v);
541                         up_read(&adev->reset_sem);
542                 } else {
543                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
544                 }
545         } else {
546                 adev->pcie_wreg(adev, reg * 4, v);
547         }
548
549         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
550 }
551
552 /*
553  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
554  *
555  * this function is invoked only the debugfs register access
556  * */
557 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
558                              uint32_t reg, uint32_t v)
559 {
560         if (amdgpu_device_skip_hw_access(adev))
561                 return;
562
563         if (amdgpu_sriov_fullaccess(adev) &&
564             adev->gfx.rlc.funcs &&
565             adev->gfx.rlc.funcs->is_rlcg_access_range) {
566                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
567                         return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
568         } else {
569                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
570         }
571 }
572
573 /**
574  * amdgpu_mm_rdoorbell - read a doorbell dword
575  *
576  * @adev: amdgpu_device pointer
577  * @index: doorbell index
578  *
579  * Returns the value in the doorbell aperture at the
580  * requested doorbell index (CIK).
581  */
582 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
583 {
584         if (amdgpu_device_skip_hw_access(adev))
585                 return 0;
586
587         if (index < adev->doorbell.num_doorbells) {
588                 return readl(adev->doorbell.ptr + index);
589         } else {
590                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
591                 return 0;
592         }
593 }
594
595 /**
596  * amdgpu_mm_wdoorbell - write a doorbell dword
597  *
598  * @adev: amdgpu_device pointer
599  * @index: doorbell index
600  * @v: value to write
601  *
602  * Writes @v to the doorbell aperture at the
603  * requested doorbell index (CIK).
604  */
605 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
606 {
607         if (amdgpu_device_skip_hw_access(adev))
608                 return;
609
610         if (index < adev->doorbell.num_doorbells) {
611                 writel(v, adev->doorbell.ptr + index);
612         } else {
613                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
614         }
615 }
616
617 /**
618  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
619  *
620  * @adev: amdgpu_device pointer
621  * @index: doorbell index
622  *
623  * Returns the value in the doorbell aperture at the
624  * requested doorbell index (VEGA10+).
625  */
626 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
627 {
628         if (amdgpu_device_skip_hw_access(adev))
629                 return 0;
630
631         if (index < adev->doorbell.num_doorbells) {
632                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
633         } else {
634                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
635                 return 0;
636         }
637 }
638
639 /**
640  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
641  *
642  * @adev: amdgpu_device pointer
643  * @index: doorbell index
644  * @v: value to write
645  *
646  * Writes @v to the doorbell aperture at the
647  * requested doorbell index (VEGA10+).
648  */
649 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
650 {
651         if (amdgpu_device_skip_hw_access(adev))
652                 return;
653
654         if (index < adev->doorbell.num_doorbells) {
655                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
656         } else {
657                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
658         }
659 }
660
661 /**
662  * amdgpu_device_indirect_rreg - read an indirect register
663  *
664  * @adev: amdgpu_device pointer
665  * @pcie_index: mmio register offset
666  * @pcie_data: mmio register offset
667  * @reg_addr: indirect register address to read from
668  *
669  * Returns the value of indirect register @reg_addr
670  */
671 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
672                                 u32 pcie_index, u32 pcie_data,
673                                 u32 reg_addr)
674 {
675         unsigned long flags;
676         u32 r;
677         void __iomem *pcie_index_offset;
678         void __iomem *pcie_data_offset;
679
680         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
681         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
682         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
683
684         writel(reg_addr, pcie_index_offset);
685         readl(pcie_index_offset);
686         r = readl(pcie_data_offset);
687         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
688
689         return r;
690 }
691
692 /**
693  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
694  *
695  * @adev: amdgpu_device pointer
696  * @pcie_index: mmio register offset
697  * @pcie_data: mmio register offset
698  * @reg_addr: indirect register address to read from
699  *
700  * Returns the value of indirect register @reg_addr
701  */
702 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
703                                   u32 pcie_index, u32 pcie_data,
704                                   u32 reg_addr)
705 {
706         unsigned long flags;
707         u64 r;
708         void __iomem *pcie_index_offset;
709         void __iomem *pcie_data_offset;
710
711         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
712         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
713         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
714
715         /* read low 32 bits */
716         writel(reg_addr, pcie_index_offset);
717         readl(pcie_index_offset);
718         r = readl(pcie_data_offset);
719         /* read high 32 bits */
720         writel(reg_addr + 4, pcie_index_offset);
721         readl(pcie_index_offset);
722         r |= ((u64)readl(pcie_data_offset) << 32);
723         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
724
725         return r;
726 }
727
728 /**
729  * amdgpu_device_indirect_wreg - write an indirect register address
730  *
731  * @adev: amdgpu_device pointer
732  * @pcie_index: mmio register offset
733  * @pcie_data: mmio register offset
734  * @reg_addr: indirect register offset
735  * @reg_data: indirect register data
736  *
737  */
738 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
739                                  u32 pcie_index, u32 pcie_data,
740                                  u32 reg_addr, u32 reg_data)
741 {
742         unsigned long flags;
743         void __iomem *pcie_index_offset;
744         void __iomem *pcie_data_offset;
745
746         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
747         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
748         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
749
750         writel(reg_addr, pcie_index_offset);
751         readl(pcie_index_offset);
752         writel(reg_data, pcie_data_offset);
753         readl(pcie_data_offset);
754         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
755 }
756
757 /**
758  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
759  *
760  * @adev: amdgpu_device pointer
761  * @pcie_index: mmio register offset
762  * @pcie_data: mmio register offset
763  * @reg_addr: indirect register offset
764  * @reg_data: indirect register data
765  *
766  */
767 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
768                                    u32 pcie_index, u32 pcie_data,
769                                    u32 reg_addr, u64 reg_data)
770 {
771         unsigned long flags;
772         void __iomem *pcie_index_offset;
773         void __iomem *pcie_data_offset;
774
775         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
776         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
777         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
778
779         /* write low 32 bits */
780         writel(reg_addr, pcie_index_offset);
781         readl(pcie_index_offset);
782         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
783         readl(pcie_data_offset);
784         /* write high 32 bits */
785         writel(reg_addr + 4, pcie_index_offset);
786         readl(pcie_index_offset);
787         writel((u32)(reg_data >> 32), pcie_data_offset);
788         readl(pcie_data_offset);
789         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
790 }
791
792 /**
793  * amdgpu_invalid_rreg - dummy reg read function
794  *
795  * @adev: amdgpu_device pointer
796  * @reg: offset of register
797  *
798  * Dummy register read function.  Used for register blocks
799  * that certain asics don't have (all asics).
800  * Returns the value in the register.
801  */
802 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
803 {
804         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
805         BUG();
806         return 0;
807 }
808
809 /**
810  * amdgpu_invalid_wreg - dummy reg write function
811  *
812  * @adev: amdgpu_device pointer
813  * @reg: offset of register
814  * @v: value to write to the register
815  *
816  * Dummy register read function.  Used for register blocks
817  * that certain asics don't have (all asics).
818  */
819 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
820 {
821         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
822                   reg, v);
823         BUG();
824 }
825
826 /**
827  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
828  *
829  * @adev: amdgpu_device pointer
830  * @reg: offset of register
831  *
832  * Dummy register read function.  Used for register blocks
833  * that certain asics don't have (all asics).
834  * Returns the value in the register.
835  */
836 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
837 {
838         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
839         BUG();
840         return 0;
841 }
842
843 /**
844  * amdgpu_invalid_wreg64 - dummy reg write function
845  *
846  * @adev: amdgpu_device pointer
847  * @reg: offset of register
848  * @v: value to write to the register
849  *
850  * Dummy register read function.  Used for register blocks
851  * that certain asics don't have (all asics).
852  */
853 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
854 {
855         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
856                   reg, v);
857         BUG();
858 }
859
860 /**
861  * amdgpu_block_invalid_rreg - dummy reg read function
862  *
863  * @adev: amdgpu_device pointer
864  * @block: offset of instance
865  * @reg: offset of register
866  *
867  * Dummy register read function.  Used for register blocks
868  * that certain asics don't have (all asics).
869  * Returns the value in the register.
870  */
871 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
872                                           uint32_t block, uint32_t reg)
873 {
874         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
875                   reg, block);
876         BUG();
877         return 0;
878 }
879
880 /**
881  * amdgpu_block_invalid_wreg - dummy reg write function
882  *
883  * @adev: amdgpu_device pointer
884  * @block: offset of instance
885  * @reg: offset of register
886  * @v: value to write to the register
887  *
888  * Dummy register read function.  Used for register blocks
889  * that certain asics don't have (all asics).
890  */
891 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
892                                       uint32_t block,
893                                       uint32_t reg, uint32_t v)
894 {
895         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
896                   reg, block, v);
897         BUG();
898 }
899
900 /**
901  * amdgpu_device_asic_init - Wrapper for atom asic_init
902  *
903  * @adev: amdgpu_device pointer
904  *
905  * Does any asic specific work and then calls atom asic init.
906  */
907 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
908 {
909         amdgpu_asic_pre_asic_init(adev);
910
911         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
912 }
913
914 /**
915  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
916  *
917  * @adev: amdgpu_device pointer
918  *
919  * Allocates a scratch page of VRAM for use by various things in the
920  * driver.
921  */
922 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
923 {
924         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
925                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
926                                        &adev->vram_scratch.robj,
927                                        &adev->vram_scratch.gpu_addr,
928                                        (void **)&adev->vram_scratch.ptr);
929 }
930
931 /**
932  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
933  *
934  * @adev: amdgpu_device pointer
935  *
936  * Frees the VRAM scratch page.
937  */
938 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
939 {
940         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
941 }
942
943 /**
944  * amdgpu_device_program_register_sequence - program an array of registers.
945  *
946  * @adev: amdgpu_device pointer
947  * @registers: pointer to the register array
948  * @array_size: size of the register array
949  *
950  * Programs an array or registers with and and or masks.
951  * This is a helper for setting golden registers.
952  */
953 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
954                                              const u32 *registers,
955                                              const u32 array_size)
956 {
957         u32 tmp, reg, and_mask, or_mask;
958         int i;
959
960         if (array_size % 3)
961                 return;
962
963         for (i = 0; i < array_size; i +=3) {
964                 reg = registers[i + 0];
965                 and_mask = registers[i + 1];
966                 or_mask = registers[i + 2];
967
968                 if (and_mask == 0xffffffff) {
969                         tmp = or_mask;
970                 } else {
971                         tmp = RREG32(reg);
972                         tmp &= ~and_mask;
973                         if (adev->family >= AMDGPU_FAMILY_AI)
974                                 tmp |= (or_mask & and_mask);
975                         else
976                                 tmp |= or_mask;
977                 }
978                 WREG32(reg, tmp);
979         }
980 }
981
982 /**
983  * amdgpu_device_pci_config_reset - reset the GPU
984  *
985  * @adev: amdgpu_device pointer
986  *
987  * Resets the GPU using the pci config reset sequence.
988  * Only applicable to asics prior to vega10.
989  */
990 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
991 {
992         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
993 }
994
995 /**
996  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
997  *
998  * @adev: amdgpu_device pointer
999  *
1000  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1001  */
1002 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1003 {
1004         return pci_reset_function(adev->pdev);
1005 }
1006
1007 /*
1008  * GPU doorbell aperture helpers function.
1009  */
1010 /**
1011  * amdgpu_device_doorbell_init - Init doorbell driver information.
1012  *
1013  * @adev: amdgpu_device pointer
1014  *
1015  * Init doorbell driver information (CIK)
1016  * Returns 0 on success, error on failure.
1017  */
1018 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1019 {
1020
1021         /* No doorbell on SI hardware generation */
1022         if (adev->asic_type < CHIP_BONAIRE) {
1023                 adev->doorbell.base = 0;
1024                 adev->doorbell.size = 0;
1025                 adev->doorbell.num_doorbells = 0;
1026                 adev->doorbell.ptr = NULL;
1027                 return 0;
1028         }
1029
1030         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1031                 return -EINVAL;
1032
1033         amdgpu_asic_init_doorbell_index(adev);
1034
1035         /* doorbell bar mapping */
1036         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1037         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1038
1039         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1040                                              adev->doorbell_index.max_assignment+1);
1041         if (adev->doorbell.num_doorbells == 0)
1042                 return -EINVAL;
1043
1044         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1045          * paging queue doorbell use the second page. The
1046          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1047          * doorbells are in the first page. So with paging queue enabled,
1048          * the max num_doorbells should + 1 page (0x400 in dword)
1049          */
1050         if (adev->asic_type >= CHIP_VEGA10)
1051                 adev->doorbell.num_doorbells += 0x400;
1052
1053         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1054                                      adev->doorbell.num_doorbells *
1055                                      sizeof(u32));
1056         if (adev->doorbell.ptr == NULL)
1057                 return -ENOMEM;
1058
1059         return 0;
1060 }
1061
1062 /**
1063  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1064  *
1065  * @adev: amdgpu_device pointer
1066  *
1067  * Tear down doorbell driver information (CIK)
1068  */
1069 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1070 {
1071         iounmap(adev->doorbell.ptr);
1072         adev->doorbell.ptr = NULL;
1073 }
1074
1075
1076
1077 /*
1078  * amdgpu_device_wb_*()
1079  * Writeback is the method by which the GPU updates special pages in memory
1080  * with the status of certain GPU events (fences, ring pointers,etc.).
1081  */
1082
1083 /**
1084  * amdgpu_device_wb_fini - Disable Writeback and free memory
1085  *
1086  * @adev: amdgpu_device pointer
1087  *
1088  * Disables Writeback and frees the Writeback memory (all asics).
1089  * Used at driver shutdown.
1090  */
1091 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1092 {
1093         if (adev->wb.wb_obj) {
1094                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1095                                       &adev->wb.gpu_addr,
1096                                       (void **)&adev->wb.wb);
1097                 adev->wb.wb_obj = NULL;
1098         }
1099 }
1100
1101 /**
1102  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1103  *
1104  * @adev: amdgpu_device pointer
1105  *
1106  * Initializes writeback and allocates writeback memory (all asics).
1107  * Used at driver startup.
1108  * Returns 0 on success or an -error on failure.
1109  */
1110 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1111 {
1112         int r;
1113
1114         if (adev->wb.wb_obj == NULL) {
1115                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1116                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1117                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1118                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1119                                             (void **)&adev->wb.wb);
1120                 if (r) {
1121                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1122                         return r;
1123                 }
1124
1125                 adev->wb.num_wb = AMDGPU_MAX_WB;
1126                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1127
1128                 /* clear wb memory */
1129                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1130         }
1131
1132         return 0;
1133 }
1134
1135 /**
1136  * amdgpu_device_wb_get - Allocate a wb entry
1137  *
1138  * @adev: amdgpu_device pointer
1139  * @wb: wb index
1140  *
1141  * Allocate a wb slot for use by the driver (all asics).
1142  * Returns 0 on success or -EINVAL on failure.
1143  */
1144 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1145 {
1146         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1147
1148         if (offset < adev->wb.num_wb) {
1149                 __set_bit(offset, adev->wb.used);
1150                 *wb = offset << 3; /* convert to dw offset */
1151                 return 0;
1152         } else {
1153                 return -EINVAL;
1154         }
1155 }
1156
1157 /**
1158  * amdgpu_device_wb_free - Free a wb entry
1159  *
1160  * @adev: amdgpu_device pointer
1161  * @wb: wb index
1162  *
1163  * Free a wb slot allocated for use by the driver (all asics)
1164  */
1165 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1166 {
1167         wb >>= 3;
1168         if (wb < adev->wb.num_wb)
1169                 __clear_bit(wb, adev->wb.used);
1170 }
1171
1172 /**
1173  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1174  *
1175  * @adev: amdgpu_device pointer
1176  *
1177  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1178  * to fail, but if any of the BARs is not accessible after the size we abort
1179  * driver loading by returning -ENODEV.
1180  */
1181 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1182 {
1183         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1184         struct pci_bus *root;
1185         struct resource *res;
1186         unsigned i;
1187         u16 cmd;
1188         int r;
1189
1190         /* Bypass for VF */
1191         if (amdgpu_sriov_vf(adev))
1192                 return 0;
1193
1194         /* skip if the bios has already enabled large BAR */
1195         if (adev->gmc.real_vram_size &&
1196             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1197                 return 0;
1198
1199         /* Check if the root BUS has 64bit memory resources */
1200         root = adev->pdev->bus;
1201         while (root->parent)
1202                 root = root->parent;
1203
1204         pci_bus_for_each_resource(root, res, i) {
1205                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1206                     res->start > 0x100000000ull)
1207                         break;
1208         }
1209
1210         /* Trying to resize is pointless without a root hub window above 4GB */
1211         if (!res)
1212                 return 0;
1213
1214         /* Limit the BAR size to what is available */
1215         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1216                         rbar_size);
1217
1218         /* Disable memory decoding while we change the BAR addresses and size */
1219         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1220         pci_write_config_word(adev->pdev, PCI_COMMAND,
1221                               cmd & ~PCI_COMMAND_MEMORY);
1222
1223         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1224         amdgpu_device_doorbell_fini(adev);
1225         if (adev->asic_type >= CHIP_BONAIRE)
1226                 pci_release_resource(adev->pdev, 2);
1227
1228         pci_release_resource(adev->pdev, 0);
1229
1230         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1231         if (r == -ENOSPC)
1232                 DRM_INFO("Not enough PCI address space for a large BAR.");
1233         else if (r && r != -ENOTSUPP)
1234                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1235
1236         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1237
1238         /* When the doorbell or fb BAR isn't available we have no chance of
1239          * using the device.
1240          */
1241         r = amdgpu_device_doorbell_init(adev);
1242         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1243                 return -ENODEV;
1244
1245         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1246
1247         return 0;
1248 }
1249
1250 /*
1251  * GPU helpers function.
1252  */
1253 /**
1254  * amdgpu_device_need_post - check if the hw need post or not
1255  *
1256  * @adev: amdgpu_device pointer
1257  *
1258  * Check if the asic has been initialized (all asics) at driver startup
1259  * or post is needed if  hw reset is performed.
1260  * Returns true if need or false if not.
1261  */
1262 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1263 {
1264         uint32_t reg;
1265
1266         if (amdgpu_sriov_vf(adev))
1267                 return false;
1268
1269         if (amdgpu_passthrough(adev)) {
1270                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1271                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1272                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1273                  * vpost executed for smc version below 22.15
1274                  */
1275                 if (adev->asic_type == CHIP_FIJI) {
1276                         int err;
1277                         uint32_t fw_ver;
1278                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1279                         /* force vPost if error occured */
1280                         if (err)
1281                                 return true;
1282
1283                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1284                         if (fw_ver < 0x00160e00)
1285                                 return true;
1286                 }
1287         }
1288
1289         /* Don't post if we need to reset whole hive on init */
1290         if (adev->gmc.xgmi.pending_reset)
1291                 return false;
1292
1293         if (adev->has_hw_reset) {
1294                 adev->has_hw_reset = false;
1295                 return true;
1296         }
1297
1298         /* bios scratch used on CIK+ */
1299         if (adev->asic_type >= CHIP_BONAIRE)
1300                 return amdgpu_atombios_scratch_need_asic_init(adev);
1301
1302         /* check MEM_SIZE for older asics */
1303         reg = amdgpu_asic_get_config_memsize(adev);
1304
1305         if ((reg != 0) && (reg != 0xffffffff))
1306                 return false;
1307
1308         return true;
1309 }
1310
1311 /* if we get transitioned to only one device, take VGA back */
1312 /**
1313  * amdgpu_device_vga_set_decode - enable/disable vga decode
1314  *
1315  * @pdev: PCI device pointer
1316  * @state: enable/disable vga decode
1317  *
1318  * Enable/disable vga decode (all asics).
1319  * Returns VGA resource flags.
1320  */
1321 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1322                 bool state)
1323 {
1324         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1325         amdgpu_asic_set_vga_state(adev, state);
1326         if (state)
1327                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1328                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1329         else
1330                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1331 }
1332
1333 /**
1334  * amdgpu_device_check_block_size - validate the vm block size
1335  *
1336  * @adev: amdgpu_device pointer
1337  *
1338  * Validates the vm block size specified via module parameter.
1339  * The vm block size defines number of bits in page table versus page directory,
1340  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1341  * page table and the remaining bits are in the page directory.
1342  */
1343 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1344 {
1345         /* defines number of bits in page table versus page directory,
1346          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1347          * page table and the remaining bits are in the page directory */
1348         if (amdgpu_vm_block_size == -1)
1349                 return;
1350
1351         if (amdgpu_vm_block_size < 9) {
1352                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1353                          amdgpu_vm_block_size);
1354                 amdgpu_vm_block_size = -1;
1355         }
1356 }
1357
1358 /**
1359  * amdgpu_device_check_vm_size - validate the vm size
1360  *
1361  * @adev: amdgpu_device pointer
1362  *
1363  * Validates the vm size in GB specified via module parameter.
1364  * The VM size is the size of the GPU virtual memory space in GB.
1365  */
1366 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1367 {
1368         /* no need to check the default value */
1369         if (amdgpu_vm_size == -1)
1370                 return;
1371
1372         if (amdgpu_vm_size < 1) {
1373                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1374                          amdgpu_vm_size);
1375                 amdgpu_vm_size = -1;
1376         }
1377 }
1378
1379 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1380 {
1381         struct sysinfo si;
1382         bool is_os_64 = (sizeof(void *) == 8);
1383         uint64_t total_memory;
1384         uint64_t dram_size_seven_GB = 0x1B8000000;
1385         uint64_t dram_size_three_GB = 0xB8000000;
1386
1387         if (amdgpu_smu_memory_pool_size == 0)
1388                 return;
1389
1390         if (!is_os_64) {
1391                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1392                 goto def_value;
1393         }
1394         si_meminfo(&si);
1395         total_memory = (uint64_t)si.totalram * si.mem_unit;
1396
1397         if ((amdgpu_smu_memory_pool_size == 1) ||
1398                 (amdgpu_smu_memory_pool_size == 2)) {
1399                 if (total_memory < dram_size_three_GB)
1400                         goto def_value1;
1401         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1402                 (amdgpu_smu_memory_pool_size == 8)) {
1403                 if (total_memory < dram_size_seven_GB)
1404                         goto def_value1;
1405         } else {
1406                 DRM_WARN("Smu memory pool size not supported\n");
1407                 goto def_value;
1408         }
1409         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1410
1411         return;
1412
1413 def_value1:
1414         DRM_WARN("No enough system memory\n");
1415 def_value:
1416         adev->pm.smu_prv_buffer_size = 0;
1417 }
1418
1419 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1420 {
1421         if (!(adev->flags & AMD_IS_APU) ||
1422             adev->asic_type < CHIP_RAVEN)
1423                 return 0;
1424
1425         switch (adev->asic_type) {
1426         case CHIP_RAVEN:
1427                 if (adev->pdev->device == 0x15dd)
1428                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1429                 if (adev->pdev->device == 0x15d8)
1430                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1431                 break;
1432         case CHIP_RENOIR:
1433                 if ((adev->pdev->device == 0x1636) ||
1434                     (adev->pdev->device == 0x164c))
1435                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1436                 else
1437                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1438                 break;
1439         case CHIP_VANGOGH:
1440                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1441                 break;
1442         case CHIP_YELLOW_CARP:
1443                 break;
1444         case CHIP_CYAN_SKILLFISH:
1445                 if (adev->pdev->device == 0x13FE)
1446                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1447                 break;
1448         default:
1449                 return -EINVAL;
1450         }
1451
1452         return 0;
1453 }
1454
1455 /**
1456  * amdgpu_device_check_arguments - validate module params
1457  *
1458  * @adev: amdgpu_device pointer
1459  *
1460  * Validates certain module parameters and updates
1461  * the associated values used by the driver (all asics).
1462  */
1463 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1464 {
1465         if (amdgpu_sched_jobs < 4) {
1466                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1467                          amdgpu_sched_jobs);
1468                 amdgpu_sched_jobs = 4;
1469         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1470                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1471                          amdgpu_sched_jobs);
1472                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1473         }
1474
1475         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1476                 /* gart size must be greater or equal to 32M */
1477                 dev_warn(adev->dev, "gart size (%d) too small\n",
1478                          amdgpu_gart_size);
1479                 amdgpu_gart_size = -1;
1480         }
1481
1482         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1483                 /* gtt size must be greater or equal to 32M */
1484                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1485                                  amdgpu_gtt_size);
1486                 amdgpu_gtt_size = -1;
1487         }
1488
1489         /* valid range is between 4 and 9 inclusive */
1490         if (amdgpu_vm_fragment_size != -1 &&
1491             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1492                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1493                 amdgpu_vm_fragment_size = -1;
1494         }
1495
1496         if (amdgpu_sched_hw_submission < 2) {
1497                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1498                          amdgpu_sched_hw_submission);
1499                 amdgpu_sched_hw_submission = 2;
1500         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1501                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1502                          amdgpu_sched_hw_submission);
1503                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1504         }
1505
1506         amdgpu_device_check_smu_prv_buffer_size(adev);
1507
1508         amdgpu_device_check_vm_size(adev);
1509
1510         amdgpu_device_check_block_size(adev);
1511
1512         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1513
1514         amdgpu_gmc_tmz_set(adev);
1515
1516         amdgpu_gmc_noretry_set(adev);
1517
1518         return 0;
1519 }
1520
1521 /**
1522  * amdgpu_switcheroo_set_state - set switcheroo state
1523  *
1524  * @pdev: pci dev pointer
1525  * @state: vga_switcheroo state
1526  *
1527  * Callback for the switcheroo driver.  Suspends or resumes the
1528  * the asics before or after it is powered up using ACPI methods.
1529  */
1530 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1531                                         enum vga_switcheroo_state state)
1532 {
1533         struct drm_device *dev = pci_get_drvdata(pdev);
1534         int r;
1535
1536         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1537                 return;
1538
1539         if (state == VGA_SWITCHEROO_ON) {
1540                 pr_info("switched on\n");
1541                 /* don't suspend or resume card normally */
1542                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1543
1544                 pci_set_power_state(pdev, PCI_D0);
1545                 amdgpu_device_load_pci_state(pdev);
1546                 r = pci_enable_device(pdev);
1547                 if (r)
1548                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1549                 amdgpu_device_resume(dev, true);
1550
1551                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1552         } else {
1553                 pr_info("switched off\n");
1554                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1555                 amdgpu_device_suspend(dev, true);
1556                 amdgpu_device_cache_pci_state(pdev);
1557                 /* Shut down the device */
1558                 pci_disable_device(pdev);
1559                 pci_set_power_state(pdev, PCI_D3cold);
1560                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1561         }
1562 }
1563
1564 /**
1565  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1566  *
1567  * @pdev: pci dev pointer
1568  *
1569  * Callback for the switcheroo driver.  Check of the switcheroo
1570  * state can be changed.
1571  * Returns true if the state can be changed, false if not.
1572  */
1573 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1574 {
1575         struct drm_device *dev = pci_get_drvdata(pdev);
1576
1577         /*
1578         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1579         * locking inversion with the driver load path. And the access here is
1580         * completely racy anyway. So don't bother with locking for now.
1581         */
1582         return atomic_read(&dev->open_count) == 0;
1583 }
1584
1585 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1586         .set_gpu_state = amdgpu_switcheroo_set_state,
1587         .reprobe = NULL,
1588         .can_switch = amdgpu_switcheroo_can_switch,
1589 };
1590
1591 /**
1592  * amdgpu_device_ip_set_clockgating_state - set the CG state
1593  *
1594  * @dev: amdgpu_device pointer
1595  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1596  * @state: clockgating state (gate or ungate)
1597  *
1598  * Sets the requested clockgating state for all instances of
1599  * the hardware IP specified.
1600  * Returns the error code from the last instance.
1601  */
1602 int amdgpu_device_ip_set_clockgating_state(void *dev,
1603                                            enum amd_ip_block_type block_type,
1604                                            enum amd_clockgating_state state)
1605 {
1606         struct amdgpu_device *adev = dev;
1607         int i, r = 0;
1608
1609         for (i = 0; i < adev->num_ip_blocks; i++) {
1610                 if (!adev->ip_blocks[i].status.valid)
1611                         continue;
1612                 if (adev->ip_blocks[i].version->type != block_type)
1613                         continue;
1614                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1615                         continue;
1616                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1617                         (void *)adev, state);
1618                 if (r)
1619                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1620                                   adev->ip_blocks[i].version->funcs->name, r);
1621         }
1622         return r;
1623 }
1624
1625 /**
1626  * amdgpu_device_ip_set_powergating_state - set the PG state
1627  *
1628  * @dev: amdgpu_device pointer
1629  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1630  * @state: powergating state (gate or ungate)
1631  *
1632  * Sets the requested powergating state for all instances of
1633  * the hardware IP specified.
1634  * Returns the error code from the last instance.
1635  */
1636 int amdgpu_device_ip_set_powergating_state(void *dev,
1637                                            enum amd_ip_block_type block_type,
1638                                            enum amd_powergating_state state)
1639 {
1640         struct amdgpu_device *adev = dev;
1641         int i, r = 0;
1642
1643         for (i = 0; i < adev->num_ip_blocks; i++) {
1644                 if (!adev->ip_blocks[i].status.valid)
1645                         continue;
1646                 if (adev->ip_blocks[i].version->type != block_type)
1647                         continue;
1648                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1649                         continue;
1650                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1651                         (void *)adev, state);
1652                 if (r)
1653                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1654                                   adev->ip_blocks[i].version->funcs->name, r);
1655         }
1656         return r;
1657 }
1658
1659 /**
1660  * amdgpu_device_ip_get_clockgating_state - get the CG state
1661  *
1662  * @adev: amdgpu_device pointer
1663  * @flags: clockgating feature flags
1664  *
1665  * Walks the list of IPs on the device and updates the clockgating
1666  * flags for each IP.
1667  * Updates @flags with the feature flags for each hardware IP where
1668  * clockgating is enabled.
1669  */
1670 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1671                                             u32 *flags)
1672 {
1673         int i;
1674
1675         for (i = 0; i < adev->num_ip_blocks; i++) {
1676                 if (!adev->ip_blocks[i].status.valid)
1677                         continue;
1678                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1679                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1680         }
1681 }
1682
1683 /**
1684  * amdgpu_device_ip_wait_for_idle - wait for idle
1685  *
1686  * @adev: amdgpu_device pointer
1687  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1688  *
1689  * Waits for the request hardware IP to be idle.
1690  * Returns 0 for success or a negative error code on failure.
1691  */
1692 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1693                                    enum amd_ip_block_type block_type)
1694 {
1695         int i, r;
1696
1697         for (i = 0; i < adev->num_ip_blocks; i++) {
1698                 if (!adev->ip_blocks[i].status.valid)
1699                         continue;
1700                 if (adev->ip_blocks[i].version->type == block_type) {
1701                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1702                         if (r)
1703                                 return r;
1704                         break;
1705                 }
1706         }
1707         return 0;
1708
1709 }
1710
1711 /**
1712  * amdgpu_device_ip_is_idle - is the hardware IP idle
1713  *
1714  * @adev: amdgpu_device pointer
1715  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1716  *
1717  * Check if the hardware IP is idle or not.
1718  * Returns true if it the IP is idle, false if not.
1719  */
1720 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1721                               enum amd_ip_block_type block_type)
1722 {
1723         int i;
1724
1725         for (i = 0; i < adev->num_ip_blocks; i++) {
1726                 if (!adev->ip_blocks[i].status.valid)
1727                         continue;
1728                 if (adev->ip_blocks[i].version->type == block_type)
1729                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1730         }
1731         return true;
1732
1733 }
1734
1735 /**
1736  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1737  *
1738  * @adev: amdgpu_device pointer
1739  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1740  *
1741  * Returns a pointer to the hardware IP block structure
1742  * if it exists for the asic, otherwise NULL.
1743  */
1744 struct amdgpu_ip_block *
1745 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1746                               enum amd_ip_block_type type)
1747 {
1748         int i;
1749
1750         for (i = 0; i < adev->num_ip_blocks; i++)
1751                 if (adev->ip_blocks[i].version->type == type)
1752                         return &adev->ip_blocks[i];
1753
1754         return NULL;
1755 }
1756
1757 /**
1758  * amdgpu_device_ip_block_version_cmp
1759  *
1760  * @adev: amdgpu_device pointer
1761  * @type: enum amd_ip_block_type
1762  * @major: major version
1763  * @minor: minor version
1764  *
1765  * return 0 if equal or greater
1766  * return 1 if smaller or the ip_block doesn't exist
1767  */
1768 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1769                                        enum amd_ip_block_type type,
1770                                        u32 major, u32 minor)
1771 {
1772         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1773
1774         if (ip_block && ((ip_block->version->major > major) ||
1775                         ((ip_block->version->major == major) &&
1776                         (ip_block->version->minor >= minor))))
1777                 return 0;
1778
1779         return 1;
1780 }
1781
1782 /**
1783  * amdgpu_device_ip_block_add
1784  *
1785  * @adev: amdgpu_device pointer
1786  * @ip_block_version: pointer to the IP to add
1787  *
1788  * Adds the IP block driver information to the collection of IPs
1789  * on the asic.
1790  */
1791 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1792                                const struct amdgpu_ip_block_version *ip_block_version)
1793 {
1794         if (!ip_block_version)
1795                 return -EINVAL;
1796
1797         switch (ip_block_version->type) {
1798         case AMD_IP_BLOCK_TYPE_VCN:
1799                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1800                         return 0;
1801                 break;
1802         case AMD_IP_BLOCK_TYPE_JPEG:
1803                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1804                         return 0;
1805                 break;
1806         default:
1807                 break;
1808         }
1809
1810         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1811                   ip_block_version->funcs->name);
1812
1813         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1814
1815         return 0;
1816 }
1817
1818 /**
1819  * amdgpu_device_enable_virtual_display - enable virtual display feature
1820  *
1821  * @adev: amdgpu_device pointer
1822  *
1823  * Enabled the virtual display feature if the user has enabled it via
1824  * the module parameter virtual_display.  This feature provides a virtual
1825  * display hardware on headless boards or in virtualized environments.
1826  * This function parses and validates the configuration string specified by
1827  * the user and configues the virtual display configuration (number of
1828  * virtual connectors, crtcs, etc.) specified.
1829  */
1830 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1831 {
1832         adev->enable_virtual_display = false;
1833
1834         if (amdgpu_virtual_display) {
1835                 const char *pci_address_name = pci_name(adev->pdev);
1836                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1837
1838                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1839                 pciaddstr_tmp = pciaddstr;
1840                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1841                         pciaddname = strsep(&pciaddname_tmp, ",");
1842                         if (!strcmp("all", pciaddname)
1843                             || !strcmp(pci_address_name, pciaddname)) {
1844                                 long num_crtc;
1845                                 int res = -1;
1846
1847                                 adev->enable_virtual_display = true;
1848
1849                                 if (pciaddname_tmp)
1850                                         res = kstrtol(pciaddname_tmp, 10,
1851                                                       &num_crtc);
1852
1853                                 if (!res) {
1854                                         if (num_crtc < 1)
1855                                                 num_crtc = 1;
1856                                         if (num_crtc > 6)
1857                                                 num_crtc = 6;
1858                                         adev->mode_info.num_crtc = num_crtc;
1859                                 } else {
1860                                         adev->mode_info.num_crtc = 1;
1861                                 }
1862                                 break;
1863                         }
1864                 }
1865
1866                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1867                          amdgpu_virtual_display, pci_address_name,
1868                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1869
1870                 kfree(pciaddstr);
1871         }
1872 }
1873
1874 /**
1875  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1876  *
1877  * @adev: amdgpu_device pointer
1878  *
1879  * Parses the asic configuration parameters specified in the gpu info
1880  * firmware and makes them availale to the driver for use in configuring
1881  * the asic.
1882  * Returns 0 on success, -EINVAL on failure.
1883  */
1884 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1885 {
1886         const char *chip_name;
1887         char fw_name[40];
1888         int err;
1889         const struct gpu_info_firmware_header_v1_0 *hdr;
1890
1891         adev->firmware.gpu_info_fw = NULL;
1892
1893         if (adev->mman.discovery_bin) {
1894                 amdgpu_discovery_get_gfx_info(adev);
1895
1896                 /*
1897                  * FIXME: The bounding box is still needed by Navi12, so
1898                  * temporarily read it from gpu_info firmware. Should be droped
1899                  * when DAL no longer needs it.
1900                  */
1901                 if (adev->asic_type != CHIP_NAVI12)
1902                         return 0;
1903         }
1904
1905         switch (adev->asic_type) {
1906 #ifdef CONFIG_DRM_AMDGPU_SI
1907         case CHIP_VERDE:
1908         case CHIP_TAHITI:
1909         case CHIP_PITCAIRN:
1910         case CHIP_OLAND:
1911         case CHIP_HAINAN:
1912 #endif
1913 #ifdef CONFIG_DRM_AMDGPU_CIK
1914         case CHIP_BONAIRE:
1915         case CHIP_HAWAII:
1916         case CHIP_KAVERI:
1917         case CHIP_KABINI:
1918         case CHIP_MULLINS:
1919 #endif
1920         case CHIP_TOPAZ:
1921         case CHIP_TONGA:
1922         case CHIP_FIJI:
1923         case CHIP_POLARIS10:
1924         case CHIP_POLARIS11:
1925         case CHIP_POLARIS12:
1926         case CHIP_VEGAM:
1927         case CHIP_CARRIZO:
1928         case CHIP_STONEY:
1929         case CHIP_VEGA20:
1930         case CHIP_ALDEBARAN:
1931         case CHIP_SIENNA_CICHLID:
1932         case CHIP_NAVY_FLOUNDER:
1933         case CHIP_DIMGREY_CAVEFISH:
1934         case CHIP_BEIGE_GOBY:
1935         default:
1936                 return 0;
1937         case CHIP_VEGA10:
1938                 chip_name = "vega10";
1939                 break;
1940         case CHIP_VEGA12:
1941                 chip_name = "vega12";
1942                 break;
1943         case CHIP_RAVEN:
1944                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1945                         chip_name = "raven2";
1946                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1947                         chip_name = "picasso";
1948                 else
1949                         chip_name = "raven";
1950                 break;
1951         case CHIP_ARCTURUS:
1952                 chip_name = "arcturus";
1953                 break;
1954         case CHIP_RENOIR:
1955                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1956                         chip_name = "renoir";
1957                 else
1958                         chip_name = "green_sardine";
1959                 break;
1960         case CHIP_NAVI10:
1961                 chip_name = "navi10";
1962                 break;
1963         case CHIP_NAVI14:
1964                 chip_name = "navi14";
1965                 break;
1966         case CHIP_NAVI12:
1967                 chip_name = "navi12";
1968                 break;
1969         case CHIP_VANGOGH:
1970                 chip_name = "vangogh";
1971                 break;
1972         case CHIP_YELLOW_CARP:
1973                 chip_name = "yellow_carp";
1974                 break;
1975         }
1976
1977         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1978         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1979         if (err) {
1980                 dev_err(adev->dev,
1981                         "Failed to load gpu_info firmware \"%s\"\n",
1982                         fw_name);
1983                 goto out;
1984         }
1985         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1986         if (err) {
1987                 dev_err(adev->dev,
1988                         "Failed to validate gpu_info firmware \"%s\"\n",
1989                         fw_name);
1990                 goto out;
1991         }
1992
1993         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1994         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1995
1996         switch (hdr->version_major) {
1997         case 1:
1998         {
1999                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2000                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2001                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2002
2003                 /*
2004                  * Should be droped when DAL no longer needs it.
2005                  */
2006                 if (adev->asic_type == CHIP_NAVI12)
2007                         goto parse_soc_bounding_box;
2008
2009                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2010                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2011                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2012                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2013                 adev->gfx.config.max_texture_channel_caches =
2014                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2015                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2016                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2017                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2018                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2019                 adev->gfx.config.double_offchip_lds_buf =
2020                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2021                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2022                 adev->gfx.cu_info.max_waves_per_simd =
2023                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2024                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2025                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2026                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2027                 if (hdr->version_minor >= 1) {
2028                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2029                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2030                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2031                         adev->gfx.config.num_sc_per_sh =
2032                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2033                         adev->gfx.config.num_packer_per_sc =
2034                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2035                 }
2036
2037 parse_soc_bounding_box:
2038                 /*
2039                  * soc bounding box info is not integrated in disocovery table,
2040                  * we always need to parse it from gpu info firmware if needed.
2041                  */
2042                 if (hdr->version_minor == 2) {
2043                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2044                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2045                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2046                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2047                 }
2048                 break;
2049         }
2050         default:
2051                 dev_err(adev->dev,
2052                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2053                 err = -EINVAL;
2054                 goto out;
2055         }
2056 out:
2057         return err;
2058 }
2059
2060 /**
2061  * amdgpu_device_ip_early_init - run early init for hardware IPs
2062  *
2063  * @adev: amdgpu_device pointer
2064  *
2065  * Early initialization pass for hardware IPs.  The hardware IPs that make
2066  * up each asic are discovered each IP's early_init callback is run.  This
2067  * is the first stage in initializing the asic.
2068  * Returns 0 on success, negative error code on failure.
2069  */
2070 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2071 {
2072         int i, r;
2073
2074         amdgpu_device_enable_virtual_display(adev);
2075
2076         if (amdgpu_sriov_vf(adev)) {
2077                 r = amdgpu_virt_request_full_gpu(adev, true);
2078                 if (r)
2079                         return r;
2080         }
2081
2082         switch (adev->asic_type) {
2083 #ifdef CONFIG_DRM_AMDGPU_SI
2084         case CHIP_VERDE:
2085         case CHIP_TAHITI:
2086         case CHIP_PITCAIRN:
2087         case CHIP_OLAND:
2088         case CHIP_HAINAN:
2089                 adev->family = AMDGPU_FAMILY_SI;
2090                 r = si_set_ip_blocks(adev);
2091                 if (r)
2092                         return r;
2093                 break;
2094 #endif
2095 #ifdef CONFIG_DRM_AMDGPU_CIK
2096         case CHIP_BONAIRE:
2097         case CHIP_HAWAII:
2098         case CHIP_KAVERI:
2099         case CHIP_KABINI:
2100         case CHIP_MULLINS:
2101                 if (adev->flags & AMD_IS_APU)
2102                         adev->family = AMDGPU_FAMILY_KV;
2103                 else
2104                         adev->family = AMDGPU_FAMILY_CI;
2105
2106                 r = cik_set_ip_blocks(adev);
2107                 if (r)
2108                         return r;
2109                 break;
2110 #endif
2111         case CHIP_TOPAZ:
2112         case CHIP_TONGA:
2113         case CHIP_FIJI:
2114         case CHIP_POLARIS10:
2115         case CHIP_POLARIS11:
2116         case CHIP_POLARIS12:
2117         case CHIP_VEGAM:
2118         case CHIP_CARRIZO:
2119         case CHIP_STONEY:
2120                 if (adev->flags & AMD_IS_APU)
2121                         adev->family = AMDGPU_FAMILY_CZ;
2122                 else
2123                         adev->family = AMDGPU_FAMILY_VI;
2124
2125                 r = vi_set_ip_blocks(adev);
2126                 if (r)
2127                         return r;
2128                 break;
2129         case CHIP_VEGA10:
2130         case CHIP_VEGA12:
2131         case CHIP_VEGA20:
2132         case CHIP_RAVEN:
2133         case CHIP_ARCTURUS:
2134         case CHIP_RENOIR:
2135         case CHIP_ALDEBARAN:
2136                 if (adev->flags & AMD_IS_APU)
2137                         adev->family = AMDGPU_FAMILY_RV;
2138                 else
2139                         adev->family = AMDGPU_FAMILY_AI;
2140
2141                 r = soc15_set_ip_blocks(adev);
2142                 if (r)
2143                         return r;
2144                 break;
2145         case  CHIP_NAVI10:
2146         case  CHIP_NAVI14:
2147         case  CHIP_NAVI12:
2148         case  CHIP_SIENNA_CICHLID:
2149         case  CHIP_NAVY_FLOUNDER:
2150         case  CHIP_DIMGREY_CAVEFISH:
2151         case  CHIP_BEIGE_GOBY:
2152         case CHIP_VANGOGH:
2153         case CHIP_YELLOW_CARP:
2154         case CHIP_CYAN_SKILLFISH:
2155                 if (adev->asic_type == CHIP_VANGOGH)
2156                         adev->family = AMDGPU_FAMILY_VGH;
2157                 else if (adev->asic_type == CHIP_YELLOW_CARP)
2158                         adev->family = AMDGPU_FAMILY_YC;
2159                 else
2160                         adev->family = AMDGPU_FAMILY_NV;
2161
2162                 r = nv_set_ip_blocks(adev);
2163                 if (r)
2164                         return r;
2165                 break;
2166         default:
2167                 /* FIXME: not supported yet */
2168                 return -EINVAL;
2169         }
2170
2171         amdgpu_amdkfd_device_probe(adev);
2172
2173         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2174         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2175                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2176         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2177                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2178
2179         for (i = 0; i < adev->num_ip_blocks; i++) {
2180                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2181                         DRM_ERROR("disabled ip block: %d <%s>\n",
2182                                   i, adev->ip_blocks[i].version->funcs->name);
2183                         adev->ip_blocks[i].status.valid = false;
2184                 } else {
2185                         if (adev->ip_blocks[i].version->funcs->early_init) {
2186                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2187                                 if (r == -ENOENT) {
2188                                         adev->ip_blocks[i].status.valid = false;
2189                                 } else if (r) {
2190                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2191                                                   adev->ip_blocks[i].version->funcs->name, r);
2192                                         return r;
2193                                 } else {
2194                                         adev->ip_blocks[i].status.valid = true;
2195                                 }
2196                         } else {
2197                                 adev->ip_blocks[i].status.valid = true;
2198                         }
2199                 }
2200                 /* get the vbios after the asic_funcs are set up */
2201                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2202                         r = amdgpu_device_parse_gpu_info_fw(adev);
2203                         if (r)
2204                                 return r;
2205
2206                         /* Read BIOS */
2207                         if (!amdgpu_get_bios(adev))
2208                                 return -EINVAL;
2209
2210                         r = amdgpu_atombios_init(adev);
2211                         if (r) {
2212                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2213                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2214                                 return r;
2215                         }
2216
2217                         /*get pf2vf msg info at it's earliest time*/
2218                         if (amdgpu_sriov_vf(adev))
2219                                 amdgpu_virt_init_data_exchange(adev);
2220
2221                 }
2222         }
2223
2224         adev->cg_flags &= amdgpu_cg_mask;
2225         adev->pg_flags &= amdgpu_pg_mask;
2226
2227         return 0;
2228 }
2229
2230 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2231 {
2232         int i, r;
2233
2234         for (i = 0; i < adev->num_ip_blocks; i++) {
2235                 if (!adev->ip_blocks[i].status.sw)
2236                         continue;
2237                 if (adev->ip_blocks[i].status.hw)
2238                         continue;
2239                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2240                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2241                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2242                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2243                         if (r) {
2244                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2245                                           adev->ip_blocks[i].version->funcs->name, r);
2246                                 return r;
2247                         }
2248                         adev->ip_blocks[i].status.hw = true;
2249                 }
2250         }
2251
2252         return 0;
2253 }
2254
2255 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2256 {
2257         int i, r;
2258
2259         for (i = 0; i < adev->num_ip_blocks; i++) {
2260                 if (!adev->ip_blocks[i].status.sw)
2261                         continue;
2262                 if (adev->ip_blocks[i].status.hw)
2263                         continue;
2264                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2265                 if (r) {
2266                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2267                                   adev->ip_blocks[i].version->funcs->name, r);
2268                         return r;
2269                 }
2270                 adev->ip_blocks[i].status.hw = true;
2271         }
2272
2273         return 0;
2274 }
2275
2276 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2277 {
2278         int r = 0;
2279         int i;
2280         uint32_t smu_version;
2281
2282         if (adev->asic_type >= CHIP_VEGA10) {
2283                 for (i = 0; i < adev->num_ip_blocks; i++) {
2284                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2285                                 continue;
2286
2287                         if (!adev->ip_blocks[i].status.sw)
2288                                 continue;
2289
2290                         /* no need to do the fw loading again if already done*/
2291                         if (adev->ip_blocks[i].status.hw == true)
2292                                 break;
2293
2294                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2295                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2296                                 if (r) {
2297                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2298                                                           adev->ip_blocks[i].version->funcs->name, r);
2299                                         return r;
2300                                 }
2301                         } else {
2302                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2303                                 if (r) {
2304                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2305                                                           adev->ip_blocks[i].version->funcs->name, r);
2306                                         return r;
2307                                 }
2308                         }
2309
2310                         adev->ip_blocks[i].status.hw = true;
2311                         break;
2312                 }
2313         }
2314
2315         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2316                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2317
2318         return r;
2319 }
2320
2321 /**
2322  * amdgpu_device_ip_init - run init for hardware IPs
2323  *
2324  * @adev: amdgpu_device pointer
2325  *
2326  * Main initialization pass for hardware IPs.  The list of all the hardware
2327  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2328  * are run.  sw_init initializes the software state associated with each IP
2329  * and hw_init initializes the hardware associated with each IP.
2330  * Returns 0 on success, negative error code on failure.
2331  */
2332 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2333 {
2334         int i, r;
2335
2336         r = amdgpu_ras_init(adev);
2337         if (r)
2338                 return r;
2339
2340         for (i = 0; i < adev->num_ip_blocks; i++) {
2341                 if (!adev->ip_blocks[i].status.valid)
2342                         continue;
2343                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2344                 if (r) {
2345                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2346                                   adev->ip_blocks[i].version->funcs->name, r);
2347                         goto init_failed;
2348                 }
2349                 adev->ip_blocks[i].status.sw = true;
2350
2351                 /* need to do gmc hw init early so we can allocate gpu mem */
2352                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2353                         r = amdgpu_device_vram_scratch_init(adev);
2354                         if (r) {
2355                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2356                                 goto init_failed;
2357                         }
2358                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2359                         if (r) {
2360                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2361                                 goto init_failed;
2362                         }
2363                         r = amdgpu_device_wb_init(adev);
2364                         if (r) {
2365                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2366                                 goto init_failed;
2367                         }
2368                         adev->ip_blocks[i].status.hw = true;
2369
2370                         /* right after GMC hw init, we create CSA */
2371                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2372                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2373                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2374                                                                 AMDGPU_CSA_SIZE);
2375                                 if (r) {
2376                                         DRM_ERROR("allocate CSA failed %d\n", r);
2377                                         goto init_failed;
2378                                 }
2379                         }
2380                 }
2381         }
2382
2383         if (amdgpu_sriov_vf(adev))
2384                 amdgpu_virt_init_data_exchange(adev);
2385
2386         r = amdgpu_ib_pool_init(adev);
2387         if (r) {
2388                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2389                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2390                 goto init_failed;
2391         }
2392
2393         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2394         if (r)
2395                 goto init_failed;
2396
2397         r = amdgpu_device_ip_hw_init_phase1(adev);
2398         if (r)
2399                 goto init_failed;
2400
2401         r = amdgpu_device_fw_loading(adev);
2402         if (r)
2403                 goto init_failed;
2404
2405         r = amdgpu_device_ip_hw_init_phase2(adev);
2406         if (r)
2407                 goto init_failed;
2408
2409         /*
2410          * retired pages will be loaded from eeprom and reserved here,
2411          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2412          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2413          * for I2C communication which only true at this point.
2414          *
2415          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2416          * failure from bad gpu situation and stop amdgpu init process
2417          * accordingly. For other failed cases, it will still release all
2418          * the resource and print error message, rather than returning one
2419          * negative value to upper level.
2420          *
2421          * Note: theoretically, this should be called before all vram allocations
2422          * to protect retired page from abusing
2423          */
2424         r = amdgpu_ras_recovery_init(adev);
2425         if (r)
2426                 goto init_failed;
2427
2428         if (adev->gmc.xgmi.num_physical_nodes > 1)
2429                 amdgpu_xgmi_add_device(adev);
2430
2431         /* Don't init kfd if whole hive need to be reset during init */
2432         if (!adev->gmc.xgmi.pending_reset)
2433                 amdgpu_amdkfd_device_init(adev);
2434
2435         amdgpu_fru_get_product_info(adev);
2436
2437 init_failed:
2438         if (amdgpu_sriov_vf(adev))
2439                 amdgpu_virt_release_full_gpu(adev, true);
2440
2441         return r;
2442 }
2443
2444 /**
2445  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2446  *
2447  * @adev: amdgpu_device pointer
2448  *
2449  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2450  * this function before a GPU reset.  If the value is retained after a
2451  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2452  */
2453 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2454 {
2455         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2456 }
2457
2458 /**
2459  * amdgpu_device_check_vram_lost - check if vram is valid
2460  *
2461  * @adev: amdgpu_device pointer
2462  *
2463  * Checks the reset magic value written to the gart pointer in VRAM.
2464  * The driver calls this after a GPU reset to see if the contents of
2465  * VRAM is lost or now.
2466  * returns true if vram is lost, false if not.
2467  */
2468 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2469 {
2470         if (memcmp(adev->gart.ptr, adev->reset_magic,
2471                         AMDGPU_RESET_MAGIC_NUM))
2472                 return true;
2473
2474         if (!amdgpu_in_reset(adev))
2475                 return false;
2476
2477         /*
2478          * For all ASICs with baco/mode1 reset, the VRAM is
2479          * always assumed to be lost.
2480          */
2481         switch (amdgpu_asic_reset_method(adev)) {
2482         case AMD_RESET_METHOD_BACO:
2483         case AMD_RESET_METHOD_MODE1:
2484                 return true;
2485         default:
2486                 return false;
2487         }
2488 }
2489
2490 /**
2491  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2492  *
2493  * @adev: amdgpu_device pointer
2494  * @state: clockgating state (gate or ungate)
2495  *
2496  * The list of all the hardware IPs that make up the asic is walked and the
2497  * set_clockgating_state callbacks are run.
2498  * Late initialization pass enabling clockgating for hardware IPs.
2499  * Fini or suspend, pass disabling clockgating for hardware IPs.
2500  * Returns 0 on success, negative error code on failure.
2501  */
2502
2503 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2504                                enum amd_clockgating_state state)
2505 {
2506         int i, j, r;
2507
2508         if (amdgpu_emu_mode == 1)
2509                 return 0;
2510
2511         for (j = 0; j < adev->num_ip_blocks; j++) {
2512                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2513                 if (!adev->ip_blocks[i].status.late_initialized)
2514                         continue;
2515                 /* skip CG for GFX on S0ix */
2516                 if (adev->in_s0ix &&
2517                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2518                         continue;
2519                 /* skip CG for VCE/UVD, it's handled specially */
2520                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2521                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2522                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2523                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2524                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2525                         /* enable clockgating to save power */
2526                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2527                                                                                      state);
2528                         if (r) {
2529                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2530                                           adev->ip_blocks[i].version->funcs->name, r);
2531                                 return r;
2532                         }
2533                 }
2534         }
2535
2536         return 0;
2537 }
2538
2539 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2540                                enum amd_powergating_state state)
2541 {
2542         int i, j, r;
2543
2544         if (amdgpu_emu_mode == 1)
2545                 return 0;
2546
2547         for (j = 0; j < adev->num_ip_blocks; j++) {
2548                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2549                 if (!adev->ip_blocks[i].status.late_initialized)
2550                         continue;
2551                 /* skip PG for GFX on S0ix */
2552                 if (adev->in_s0ix &&
2553                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2554                         continue;
2555                 /* skip CG for VCE/UVD, it's handled specially */
2556                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2557                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2558                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2559                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2560                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2561                         /* enable powergating to save power */
2562                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2563                                                                                         state);
2564                         if (r) {
2565                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2566                                           adev->ip_blocks[i].version->funcs->name, r);
2567                                 return r;
2568                         }
2569                 }
2570         }
2571         return 0;
2572 }
2573
2574 static int amdgpu_device_enable_mgpu_fan_boost(void)
2575 {
2576         struct amdgpu_gpu_instance *gpu_ins;
2577         struct amdgpu_device *adev;
2578         int i, ret = 0;
2579
2580         mutex_lock(&mgpu_info.mutex);
2581
2582         /*
2583          * MGPU fan boost feature should be enabled
2584          * only when there are two or more dGPUs in
2585          * the system
2586          */
2587         if (mgpu_info.num_dgpu < 2)
2588                 goto out;
2589
2590         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2591                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2592                 adev = gpu_ins->adev;
2593                 if (!(adev->flags & AMD_IS_APU) &&
2594                     !gpu_ins->mgpu_fan_enabled) {
2595                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2596                         if (ret)
2597                                 break;
2598
2599                         gpu_ins->mgpu_fan_enabled = 1;
2600                 }
2601         }
2602
2603 out:
2604         mutex_unlock(&mgpu_info.mutex);
2605
2606         return ret;
2607 }
2608
2609 /**
2610  * amdgpu_device_ip_late_init - run late init for hardware IPs
2611  *
2612  * @adev: amdgpu_device pointer
2613  *
2614  * Late initialization pass for hardware IPs.  The list of all the hardware
2615  * IPs that make up the asic is walked and the late_init callbacks are run.
2616  * late_init covers any special initialization that an IP requires
2617  * after all of the have been initialized or something that needs to happen
2618  * late in the init process.
2619  * Returns 0 on success, negative error code on failure.
2620  */
2621 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2622 {
2623         struct amdgpu_gpu_instance *gpu_instance;
2624         int i = 0, r;
2625
2626         for (i = 0; i < adev->num_ip_blocks; i++) {
2627                 if (!adev->ip_blocks[i].status.hw)
2628                         continue;
2629                 if (adev->ip_blocks[i].version->funcs->late_init) {
2630                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2631                         if (r) {
2632                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2633                                           adev->ip_blocks[i].version->funcs->name, r);
2634                                 return r;
2635                         }
2636                 }
2637                 adev->ip_blocks[i].status.late_initialized = true;
2638         }
2639
2640         amdgpu_ras_set_error_query_ready(adev, true);
2641
2642         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2643         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2644
2645         amdgpu_device_fill_reset_magic(adev);
2646
2647         r = amdgpu_device_enable_mgpu_fan_boost();
2648         if (r)
2649                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2650
2651         /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2652         if (adev->asic_type == CHIP_ARCTURUS &&
2653             amdgpu_passthrough(adev) &&
2654             adev->gmc.xgmi.num_physical_nodes > 1)
2655                 smu_set_light_sbr(&adev->smu, true);
2656
2657         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2658                 mutex_lock(&mgpu_info.mutex);
2659
2660                 /*
2661                  * Reset device p-state to low as this was booted with high.
2662                  *
2663                  * This should be performed only after all devices from the same
2664                  * hive get initialized.
2665                  *
2666                  * However, it's unknown how many device in the hive in advance.
2667                  * As this is counted one by one during devices initializations.
2668                  *
2669                  * So, we wait for all XGMI interlinked devices initialized.
2670                  * This may bring some delays as those devices may come from
2671                  * different hives. But that should be OK.
2672                  */
2673                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2674                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2675                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2676                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2677                                         continue;
2678
2679                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2680                                                 AMDGPU_XGMI_PSTATE_MIN);
2681                                 if (r) {
2682                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2683                                         break;
2684                                 }
2685                         }
2686                 }
2687
2688                 mutex_unlock(&mgpu_info.mutex);
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2695 {
2696         int i, r;
2697
2698         for (i = 0; i < adev->num_ip_blocks; i++) {
2699                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2700                         continue;
2701
2702                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2703                 if (r) {
2704                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2705                                   adev->ip_blocks[i].version->funcs->name, r);
2706                 }
2707         }
2708
2709         amdgpu_amdkfd_suspend(adev, false);
2710
2711         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2712         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2713
2714         /* need to disable SMC first */
2715         for (i = 0; i < adev->num_ip_blocks; i++) {
2716                 if (!adev->ip_blocks[i].status.hw)
2717                         continue;
2718                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2719                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2720                         /* XXX handle errors */
2721                         if (r) {
2722                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2723                                           adev->ip_blocks[i].version->funcs->name, r);
2724                         }
2725                         adev->ip_blocks[i].status.hw = false;
2726                         break;
2727                 }
2728         }
2729
2730         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2731                 if (!adev->ip_blocks[i].status.hw)
2732                         continue;
2733
2734                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2735                 /* XXX handle errors */
2736                 if (r) {
2737                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2738                                   adev->ip_blocks[i].version->funcs->name, r);
2739                 }
2740
2741                 adev->ip_blocks[i].status.hw = false;
2742         }
2743
2744         return 0;
2745 }
2746
2747 /**
2748  * amdgpu_device_ip_fini - run fini for hardware IPs
2749  *
2750  * @adev: amdgpu_device pointer
2751  *
2752  * Main teardown pass for hardware IPs.  The list of all the hardware
2753  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2754  * are run.  hw_fini tears down the hardware associated with each IP
2755  * and sw_fini tears down any software state associated with each IP.
2756  * Returns 0 on success, negative error code on failure.
2757  */
2758 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2759 {
2760         int i, r;
2761
2762         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2763                 amdgpu_virt_release_ras_err_handler_data(adev);
2764
2765         amdgpu_ras_pre_fini(adev);
2766
2767         if (adev->gmc.xgmi.num_physical_nodes > 1)
2768                 amdgpu_xgmi_remove_device(adev);
2769
2770         amdgpu_amdkfd_device_fini_sw(adev);
2771
2772         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2773                 if (!adev->ip_blocks[i].status.sw)
2774                         continue;
2775
2776                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2777                         amdgpu_ucode_free_bo(adev);
2778                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2779                         amdgpu_device_wb_fini(adev);
2780                         amdgpu_device_vram_scratch_fini(adev);
2781                         amdgpu_ib_pool_fini(adev);
2782                 }
2783
2784                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2785                 /* XXX handle errors */
2786                 if (r) {
2787                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2788                                   adev->ip_blocks[i].version->funcs->name, r);
2789                 }
2790                 adev->ip_blocks[i].status.sw = false;
2791                 adev->ip_blocks[i].status.valid = false;
2792         }
2793
2794         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2795                 if (!adev->ip_blocks[i].status.late_initialized)
2796                         continue;
2797                 if (adev->ip_blocks[i].version->funcs->late_fini)
2798                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2799                 adev->ip_blocks[i].status.late_initialized = false;
2800         }
2801
2802         amdgpu_ras_fini(adev);
2803
2804         if (amdgpu_sriov_vf(adev))
2805                 if (amdgpu_virt_release_full_gpu(adev, false))
2806                         DRM_ERROR("failed to release exclusive mode on fini\n");
2807
2808         return 0;
2809 }
2810
2811 /**
2812  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2813  *
2814  * @work: work_struct.
2815  */
2816 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2817 {
2818         struct amdgpu_device *adev =
2819                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2820         int r;
2821
2822         r = amdgpu_ib_ring_tests(adev);
2823         if (r)
2824                 DRM_ERROR("ib ring test failed (%d).\n", r);
2825 }
2826
2827 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2828 {
2829         struct amdgpu_device *adev =
2830                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2831
2832         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2833         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2834
2835         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2836                 adev->gfx.gfx_off_state = true;
2837 }
2838
2839 /**
2840  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2841  *
2842  * @adev: amdgpu_device pointer
2843  *
2844  * Main suspend function for hardware IPs.  The list of all the hardware
2845  * IPs that make up the asic is walked, clockgating is disabled and the
2846  * suspend callbacks are run.  suspend puts the hardware and software state
2847  * in each IP into a state suitable for suspend.
2848  * Returns 0 on success, negative error code on failure.
2849  */
2850 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2851 {
2852         int i, r;
2853
2854         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2855         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2856
2857         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2858                 if (!adev->ip_blocks[i].status.valid)
2859                         continue;
2860
2861                 /* displays are handled separately */
2862                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2863                         continue;
2864
2865                 /* XXX handle errors */
2866                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2867                 /* XXX handle errors */
2868                 if (r) {
2869                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2870                                   adev->ip_blocks[i].version->funcs->name, r);
2871                         return r;
2872                 }
2873
2874                 adev->ip_blocks[i].status.hw = false;
2875         }
2876
2877         return 0;
2878 }
2879
2880 /**
2881  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2882  *
2883  * @adev: amdgpu_device pointer
2884  *
2885  * Main suspend function for hardware IPs.  The list of all the hardware
2886  * IPs that make up the asic is walked, clockgating is disabled and the
2887  * suspend callbacks are run.  suspend puts the hardware and software state
2888  * in each IP into a state suitable for suspend.
2889  * Returns 0 on success, negative error code on failure.
2890  */
2891 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2892 {
2893         int i, r;
2894
2895         if (adev->in_s0ix)
2896                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2897
2898         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2899                 if (!adev->ip_blocks[i].status.valid)
2900                         continue;
2901                 /* displays are handled in phase1 */
2902                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2903                         continue;
2904                 /* PSP lost connection when err_event_athub occurs */
2905                 if (amdgpu_ras_intr_triggered() &&
2906                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2907                         adev->ip_blocks[i].status.hw = false;
2908                         continue;
2909                 }
2910
2911                 /* skip unnecessary suspend if we do not initialize them yet */
2912                 if (adev->gmc.xgmi.pending_reset &&
2913                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2914                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2915                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2916                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2917                         adev->ip_blocks[i].status.hw = false;
2918                         continue;
2919                 }
2920
2921                 /* skip suspend of gfx and psp for S0ix
2922                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2923                  * like at runtime. PSP is also part of the always on hardware
2924                  * so no need to suspend it.
2925                  */
2926                 if (adev->in_s0ix &&
2927                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2928                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2929                         continue;
2930
2931                 /* XXX handle errors */
2932                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2933                 /* XXX handle errors */
2934                 if (r) {
2935                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2936                                   adev->ip_blocks[i].version->funcs->name, r);
2937                 }
2938                 adev->ip_blocks[i].status.hw = false;
2939                 /* handle putting the SMC in the appropriate state */
2940                 if(!amdgpu_sriov_vf(adev)){
2941                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2942                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2943                                 if (r) {
2944                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2945                                                         adev->mp1_state, r);
2946                                         return r;
2947                                 }
2948                         }
2949                 }
2950         }
2951
2952         return 0;
2953 }
2954
2955 /**
2956  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2957  *
2958  * @adev: amdgpu_device pointer
2959  *
2960  * Main suspend function for hardware IPs.  The list of all the hardware
2961  * IPs that make up the asic is walked, clockgating is disabled and the
2962  * suspend callbacks are run.  suspend puts the hardware and software state
2963  * in each IP into a state suitable for suspend.
2964  * Returns 0 on success, negative error code on failure.
2965  */
2966 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2967 {
2968         int r;
2969
2970         if (amdgpu_sriov_vf(adev)) {
2971                 amdgpu_virt_fini_data_exchange(adev);
2972                 amdgpu_virt_request_full_gpu(adev, false);
2973         }
2974
2975         r = amdgpu_device_ip_suspend_phase1(adev);
2976         if (r)
2977                 return r;
2978         r = amdgpu_device_ip_suspend_phase2(adev);
2979
2980         if (amdgpu_sriov_vf(adev))
2981                 amdgpu_virt_release_full_gpu(adev, false);
2982
2983         return r;
2984 }
2985
2986 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2987 {
2988         int i, r;
2989
2990         static enum amd_ip_block_type ip_order[] = {
2991                 AMD_IP_BLOCK_TYPE_GMC,
2992                 AMD_IP_BLOCK_TYPE_COMMON,
2993                 AMD_IP_BLOCK_TYPE_PSP,
2994                 AMD_IP_BLOCK_TYPE_IH,
2995         };
2996
2997         for (i = 0; i < adev->num_ip_blocks; i++) {
2998                 int j;
2999                 struct amdgpu_ip_block *block;
3000
3001                 block = &adev->ip_blocks[i];
3002                 block->status.hw = false;
3003
3004                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3005
3006                         if (block->version->type != ip_order[j] ||
3007                                 !block->status.valid)
3008                                 continue;
3009
3010                         r = block->version->funcs->hw_init(adev);
3011                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3012                         if (r)
3013                                 return r;
3014                         block->status.hw = true;
3015                 }
3016         }
3017
3018         return 0;
3019 }
3020
3021 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3022 {
3023         int i, r;
3024
3025         static enum amd_ip_block_type ip_order[] = {
3026                 AMD_IP_BLOCK_TYPE_SMC,
3027                 AMD_IP_BLOCK_TYPE_DCE,
3028                 AMD_IP_BLOCK_TYPE_GFX,
3029                 AMD_IP_BLOCK_TYPE_SDMA,
3030                 AMD_IP_BLOCK_TYPE_UVD,
3031                 AMD_IP_BLOCK_TYPE_VCE,
3032                 AMD_IP_BLOCK_TYPE_VCN
3033         };
3034
3035         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3036                 int j;
3037                 struct amdgpu_ip_block *block;
3038
3039                 for (j = 0; j < adev->num_ip_blocks; j++) {
3040                         block = &adev->ip_blocks[j];
3041
3042                         if (block->version->type != ip_order[i] ||
3043                                 !block->status.valid ||
3044                                 block->status.hw)
3045                                 continue;
3046
3047                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3048                                 r = block->version->funcs->resume(adev);
3049                         else
3050                                 r = block->version->funcs->hw_init(adev);
3051
3052                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3053                         if (r)
3054                                 return r;
3055                         block->status.hw = true;
3056                 }
3057         }
3058
3059         return 0;
3060 }
3061
3062 /**
3063  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3064  *
3065  * @adev: amdgpu_device pointer
3066  *
3067  * First resume function for hardware IPs.  The list of all the hardware
3068  * IPs that make up the asic is walked and the resume callbacks are run for
3069  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3070  * after a suspend and updates the software state as necessary.  This
3071  * function is also used for restoring the GPU after a GPU reset.
3072  * Returns 0 on success, negative error code on failure.
3073  */
3074 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3075 {
3076         int i, r;
3077
3078         for (i = 0; i < adev->num_ip_blocks; i++) {
3079                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3080                         continue;
3081                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3082                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3083                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3084
3085                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3086                         if (r) {
3087                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3088                                           adev->ip_blocks[i].version->funcs->name, r);
3089                                 return r;
3090                         }
3091                         adev->ip_blocks[i].status.hw = true;
3092                 }
3093         }
3094
3095         return 0;
3096 }
3097
3098 /**
3099  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3100  *
3101  * @adev: amdgpu_device pointer
3102  *
3103  * First resume function for hardware IPs.  The list of all the hardware
3104  * IPs that make up the asic is walked and the resume callbacks are run for
3105  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3106  * functional state after a suspend and updates the software state as
3107  * necessary.  This function is also used for restoring the GPU after a GPU
3108  * reset.
3109  * Returns 0 on success, negative error code on failure.
3110  */
3111 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3112 {
3113         int i, r;
3114
3115         for (i = 0; i < adev->num_ip_blocks; i++) {
3116                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3117                         continue;
3118                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3119                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3120                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3121                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3122                         continue;
3123                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3124                 if (r) {
3125                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3126                                   adev->ip_blocks[i].version->funcs->name, r);
3127                         return r;
3128                 }
3129                 adev->ip_blocks[i].status.hw = true;
3130         }
3131
3132         return 0;
3133 }
3134
3135 /**
3136  * amdgpu_device_ip_resume - run resume for hardware IPs
3137  *
3138  * @adev: amdgpu_device pointer
3139  *
3140  * Main resume function for hardware IPs.  The hardware IPs
3141  * are split into two resume functions because they are
3142  * are also used in in recovering from a GPU reset and some additional
3143  * steps need to be take between them.  In this case (S3/S4) they are
3144  * run sequentially.
3145  * Returns 0 on success, negative error code on failure.
3146  */
3147 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3148 {
3149         int r;
3150
3151         r = amdgpu_device_ip_resume_phase1(adev);
3152         if (r)
3153                 return r;
3154
3155         r = amdgpu_device_fw_loading(adev);
3156         if (r)
3157                 return r;
3158
3159         r = amdgpu_device_ip_resume_phase2(adev);
3160
3161         return r;
3162 }
3163
3164 /**
3165  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3166  *
3167  * @adev: amdgpu_device pointer
3168  *
3169  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3170  */
3171 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3172 {
3173         if (amdgpu_sriov_vf(adev)) {
3174                 if (adev->is_atom_fw) {
3175                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3176                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3177                 } else {
3178                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3179                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3180                 }
3181
3182                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3183                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3184         }
3185 }
3186
3187 /**
3188  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3189  *
3190  * @asic_type: AMD asic type
3191  *
3192  * Check if there is DC (new modesetting infrastructre) support for an asic.
3193  * returns true if DC has support, false if not.
3194  */
3195 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3196 {
3197         switch (asic_type) {
3198 #if defined(CONFIG_DRM_AMD_DC)
3199 #if defined(CONFIG_DRM_AMD_DC_SI)
3200         case CHIP_TAHITI:
3201         case CHIP_PITCAIRN:
3202         case CHIP_VERDE:
3203         case CHIP_OLAND:
3204 #endif
3205         case CHIP_BONAIRE:
3206         case CHIP_KAVERI:
3207         case CHIP_KABINI:
3208         case CHIP_MULLINS:
3209                 /*
3210                  * We have systems in the wild with these ASICs that require
3211                  * LVDS and VGA support which is not supported with DC.
3212                  *
3213                  * Fallback to the non-DC driver here by default so as not to
3214                  * cause regressions.
3215                  */
3216                 return amdgpu_dc > 0;
3217         case CHIP_HAWAII:
3218         case CHIP_CARRIZO:
3219         case CHIP_STONEY:
3220         case CHIP_POLARIS10:
3221         case CHIP_POLARIS11:
3222         case CHIP_POLARIS12:
3223         case CHIP_VEGAM:
3224         case CHIP_TONGA:
3225         case CHIP_FIJI:
3226         case CHIP_VEGA10:
3227         case CHIP_VEGA12:
3228         case CHIP_VEGA20:
3229 #if defined(CONFIG_DRM_AMD_DC_DCN)
3230         case CHIP_RAVEN:
3231         case CHIP_NAVI10:
3232         case CHIP_NAVI14:
3233         case CHIP_NAVI12:
3234         case CHIP_RENOIR:
3235         case CHIP_SIENNA_CICHLID:
3236         case CHIP_NAVY_FLOUNDER:
3237         case CHIP_DIMGREY_CAVEFISH:
3238         case CHIP_BEIGE_GOBY:
3239         case CHIP_VANGOGH:
3240         case CHIP_YELLOW_CARP:
3241 #endif
3242                 return amdgpu_dc != 0;
3243 #endif
3244         default:
3245                 if (amdgpu_dc > 0)
3246                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3247                                          "but isn't supported by ASIC, ignoring\n");
3248                 return false;
3249         }
3250 }
3251
3252 /**
3253  * amdgpu_device_has_dc_support - check if dc is supported
3254  *
3255  * @adev: amdgpu_device pointer
3256  *
3257  * Returns true for supported, false for not supported
3258  */
3259 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3260 {
3261         if (amdgpu_sriov_vf(adev) || 
3262             adev->enable_virtual_display ||
3263             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3264                 return false;
3265
3266         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3267 }
3268
3269 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3270 {
3271         struct amdgpu_device *adev =
3272                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3273         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3274
3275         /* It's a bug to not have a hive within this function */
3276         if (WARN_ON(!hive))
3277                 return;
3278
3279         /*
3280          * Use task barrier to synchronize all xgmi reset works across the
3281          * hive. task_barrier_enter and task_barrier_exit will block
3282          * until all the threads running the xgmi reset works reach
3283          * those points. task_barrier_full will do both blocks.
3284          */
3285         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3286
3287                 task_barrier_enter(&hive->tb);
3288                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3289
3290                 if (adev->asic_reset_res)
3291                         goto fail;
3292
3293                 task_barrier_exit(&hive->tb);
3294                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3295
3296                 if (adev->asic_reset_res)
3297                         goto fail;
3298
3299                 if (adev->mmhub.ras_funcs &&
3300                     adev->mmhub.ras_funcs->reset_ras_error_count)
3301                         adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3302         } else {
3303
3304                 task_barrier_full(&hive->tb);
3305                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3306         }
3307
3308 fail:
3309         if (adev->asic_reset_res)
3310                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3311                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3312         amdgpu_put_xgmi_hive(hive);
3313 }
3314
3315 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3316 {
3317         char *input = amdgpu_lockup_timeout;
3318         char *timeout_setting = NULL;
3319         int index = 0;
3320         long timeout;
3321         int ret = 0;
3322
3323         /*
3324          * By default timeout for non compute jobs is 10000
3325          * and 60000 for compute jobs.
3326          * In SR-IOV or passthrough mode, timeout for compute
3327          * jobs are 60000 by default.
3328          */
3329         adev->gfx_timeout = msecs_to_jiffies(10000);
3330         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3331         if (amdgpu_sriov_vf(adev))
3332                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3333                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3334         else
3335                 adev->compute_timeout =  msecs_to_jiffies(60000);
3336
3337         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3338                 while ((timeout_setting = strsep(&input, ",")) &&
3339                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3340                         ret = kstrtol(timeout_setting, 0, &timeout);
3341                         if (ret)
3342                                 return ret;
3343
3344                         if (timeout == 0) {
3345                                 index++;
3346                                 continue;
3347                         } else if (timeout < 0) {
3348                                 timeout = MAX_SCHEDULE_TIMEOUT;
3349                         } else {
3350                                 timeout = msecs_to_jiffies(timeout);
3351                         }
3352
3353                         switch (index++) {
3354                         case 0:
3355                                 adev->gfx_timeout = timeout;
3356                                 break;
3357                         case 1:
3358                                 adev->compute_timeout = timeout;
3359                                 break;
3360                         case 2:
3361                                 adev->sdma_timeout = timeout;
3362                                 break;
3363                         case 3:
3364                                 adev->video_timeout = timeout;
3365                                 break;
3366                         default:
3367                                 break;
3368                         }
3369                 }
3370                 /*
3371                  * There is only one value specified and
3372                  * it should apply to all non-compute jobs.
3373                  */
3374                 if (index == 1) {
3375                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3376                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3377                                 adev->compute_timeout = adev->gfx_timeout;
3378                 }
3379         }
3380
3381         return ret;
3382 }
3383
3384 static const struct attribute *amdgpu_dev_attributes[] = {
3385         &dev_attr_product_name.attr,
3386         &dev_attr_product_number.attr,
3387         &dev_attr_serial_number.attr,
3388         &dev_attr_pcie_replay_count.attr,
3389         NULL
3390 };
3391
3392 /**
3393  * amdgpu_device_init - initialize the driver
3394  *
3395  * @adev: amdgpu_device pointer
3396  * @flags: driver flags
3397  *
3398  * Initializes the driver info and hw (all asics).
3399  * Returns 0 for success or an error on failure.
3400  * Called at driver startup.
3401  */
3402 int amdgpu_device_init(struct amdgpu_device *adev,
3403                        uint32_t flags)
3404 {
3405         struct drm_device *ddev = adev_to_drm(adev);
3406         struct pci_dev *pdev = adev->pdev;
3407         int r, i;
3408         bool px = false;
3409         u32 max_MBps;
3410
3411         adev->shutdown = false;
3412         adev->flags = flags;
3413
3414         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3415                 adev->asic_type = amdgpu_force_asic_type;
3416         else
3417                 adev->asic_type = flags & AMD_ASIC_MASK;
3418
3419         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3420         if (amdgpu_emu_mode == 1)
3421                 adev->usec_timeout *= 10;
3422         adev->gmc.gart_size = 512 * 1024 * 1024;
3423         adev->accel_working = false;
3424         adev->num_rings = 0;
3425         adev->mman.buffer_funcs = NULL;
3426         adev->mman.buffer_funcs_ring = NULL;
3427         adev->vm_manager.vm_pte_funcs = NULL;
3428         adev->vm_manager.vm_pte_num_scheds = 0;
3429         adev->gmc.gmc_funcs = NULL;
3430         adev->harvest_ip_mask = 0x0;
3431         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3432         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3433
3434         adev->smc_rreg = &amdgpu_invalid_rreg;
3435         adev->smc_wreg = &amdgpu_invalid_wreg;
3436         adev->pcie_rreg = &amdgpu_invalid_rreg;
3437         adev->pcie_wreg = &amdgpu_invalid_wreg;
3438         adev->pciep_rreg = &amdgpu_invalid_rreg;
3439         adev->pciep_wreg = &amdgpu_invalid_wreg;
3440         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3441         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3442         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3443         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3444         adev->didt_rreg = &amdgpu_invalid_rreg;
3445         adev->didt_wreg = &amdgpu_invalid_wreg;
3446         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3447         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3448         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3449         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3450
3451         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3452                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3453                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3454
3455         /* mutex initialization are all done here so we
3456          * can recall function without having locking issues */
3457         mutex_init(&adev->firmware.mutex);
3458         mutex_init(&adev->pm.mutex);
3459         mutex_init(&adev->gfx.gpu_clock_mutex);
3460         mutex_init(&adev->srbm_mutex);
3461         mutex_init(&adev->gfx.pipe_reserve_mutex);
3462         mutex_init(&adev->gfx.gfx_off_mutex);
3463         mutex_init(&adev->grbm_idx_mutex);
3464         mutex_init(&adev->mn_lock);
3465         mutex_init(&adev->virt.vf_errors.lock);
3466         hash_init(adev->mn_hash);
3467         atomic_set(&adev->in_gpu_reset, 0);
3468         init_rwsem(&adev->reset_sem);
3469         mutex_init(&adev->psp.mutex);
3470         mutex_init(&adev->notifier_lock);
3471
3472         r = amdgpu_device_init_apu_flags(adev);
3473         if (r)
3474                 return r;
3475
3476         r = amdgpu_device_check_arguments(adev);
3477         if (r)
3478                 return r;
3479
3480         spin_lock_init(&adev->mmio_idx_lock);
3481         spin_lock_init(&adev->smc_idx_lock);
3482         spin_lock_init(&adev->pcie_idx_lock);
3483         spin_lock_init(&adev->uvd_ctx_idx_lock);
3484         spin_lock_init(&adev->didt_idx_lock);
3485         spin_lock_init(&adev->gc_cac_idx_lock);
3486         spin_lock_init(&adev->se_cac_idx_lock);
3487         spin_lock_init(&adev->audio_endpt_idx_lock);
3488         spin_lock_init(&adev->mm_stats.lock);
3489
3490         INIT_LIST_HEAD(&adev->shadow_list);
3491         mutex_init(&adev->shadow_list_lock);
3492
3493         INIT_LIST_HEAD(&adev->reset_list);
3494
3495         INIT_DELAYED_WORK(&adev->delayed_init_work,
3496                           amdgpu_device_delayed_init_work_handler);
3497         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3498                           amdgpu_device_delay_enable_gfx_off);
3499
3500         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3501
3502         adev->gfx.gfx_off_req_count = 1;
3503         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3504
3505         atomic_set(&adev->throttling_logging_enabled, 1);
3506         /*
3507          * If throttling continues, logging will be performed every minute
3508          * to avoid log flooding. "-1" is subtracted since the thermal
3509          * throttling interrupt comes every second. Thus, the total logging
3510          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3511          * for throttling interrupt) = 60 seconds.
3512          */
3513         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3514         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3515
3516         /* Registers mapping */
3517         /* TODO: block userspace mapping of io register */
3518         if (adev->asic_type >= CHIP_BONAIRE) {
3519                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3520                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3521         } else {
3522                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3523                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3524         }
3525
3526         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3527         if (adev->rmmio == NULL) {
3528                 return -ENOMEM;
3529         }
3530         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3531         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3532
3533         /* enable PCIE atomic ops */
3534         r = pci_enable_atomic_ops_to_root(adev->pdev,
3535                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3536                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3537         if (r) {
3538                 adev->have_atomics_support = false;
3539                 DRM_INFO("PCIE atomic ops is not supported\n");
3540         } else {
3541                 adev->have_atomics_support = true;
3542         }
3543
3544         amdgpu_device_get_pcie_info(adev);
3545
3546         if (amdgpu_mcbp)
3547                 DRM_INFO("MCBP is enabled\n");
3548
3549         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3550                 adev->enable_mes = true;
3551
3552         /* detect hw virtualization here */
3553         amdgpu_detect_virtualization(adev);
3554
3555         r = amdgpu_device_get_job_timeout_settings(adev);
3556         if (r) {
3557                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3558                 return r;
3559         }
3560
3561         /* early init functions */
3562         r = amdgpu_device_ip_early_init(adev);
3563         if (r)
3564                 return r;
3565
3566         /* doorbell bar mapping and doorbell index init*/
3567         amdgpu_device_doorbell_init(adev);
3568
3569         if (amdgpu_emu_mode == 1) {
3570                 /* post the asic on emulation mode */
3571                 emu_soc_asic_init(adev);
3572                 goto fence_driver_init;
3573         }
3574
3575         amdgpu_reset_init(adev);
3576
3577         /* detect if we are with an SRIOV vbios */
3578         amdgpu_device_detect_sriov_bios(adev);
3579
3580         /* check if we need to reset the asic
3581          *  E.g., driver was not cleanly unloaded previously, etc.
3582          */
3583         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3584                 if (adev->gmc.xgmi.num_physical_nodes) {
3585                         dev_info(adev->dev, "Pending hive reset.\n");
3586                         adev->gmc.xgmi.pending_reset = true;
3587                         /* Only need to init necessary block for SMU to handle the reset */
3588                         for (i = 0; i < adev->num_ip_blocks; i++) {
3589                                 if (!adev->ip_blocks[i].status.valid)
3590                                         continue;
3591                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3592                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3593                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3594                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3595                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3596                                                 adev->ip_blocks[i].version->funcs->name);
3597                                         adev->ip_blocks[i].status.hw = true;
3598                                 }
3599                         }
3600                 } else {
3601                         r = amdgpu_asic_reset(adev);
3602                         if (r) {
3603                                 dev_err(adev->dev, "asic reset on init failed\n");
3604                                 goto failed;
3605                         }
3606                 }
3607         }
3608
3609         pci_enable_pcie_error_reporting(adev->pdev);
3610
3611         /* Post card if necessary */
3612         if (amdgpu_device_need_post(adev)) {
3613                 if (!adev->bios) {
3614                         dev_err(adev->dev, "no vBIOS found\n");
3615                         r = -EINVAL;
3616                         goto failed;
3617                 }
3618                 DRM_INFO("GPU posting now...\n");
3619                 r = amdgpu_device_asic_init(adev);
3620                 if (r) {
3621                         dev_err(adev->dev, "gpu post error!\n");
3622                         goto failed;
3623                 }
3624         }
3625
3626         if (adev->is_atom_fw) {
3627                 /* Initialize clocks */
3628                 r = amdgpu_atomfirmware_get_clock_info(adev);
3629                 if (r) {
3630                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3631                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3632                         goto failed;
3633                 }
3634         } else {
3635                 /* Initialize clocks */
3636                 r = amdgpu_atombios_get_clock_info(adev);
3637                 if (r) {
3638                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3639                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3640                         goto failed;
3641                 }
3642                 /* init i2c buses */
3643                 if (!amdgpu_device_has_dc_support(adev))
3644                         amdgpu_atombios_i2c_init(adev);
3645         }
3646
3647 fence_driver_init:
3648         /* Fence driver */
3649         r = amdgpu_fence_driver_sw_init(adev);
3650         if (r) {
3651                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3652                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3653                 goto failed;
3654         }
3655
3656         /* init the mode config */
3657         drm_mode_config_init(adev_to_drm(adev));
3658
3659         r = amdgpu_device_ip_init(adev);
3660         if (r) {
3661                 /* failed in exclusive mode due to timeout */
3662                 if (amdgpu_sriov_vf(adev) &&
3663                     !amdgpu_sriov_runtime(adev) &&
3664                     amdgpu_virt_mmio_blocked(adev) &&
3665                     !amdgpu_virt_wait_reset(adev)) {
3666                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3667                         /* Don't send request since VF is inactive. */
3668                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3669                         adev->virt.ops = NULL;
3670                         r = -EAGAIN;
3671                         goto release_ras_con;
3672                 }
3673                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3674                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3675                 goto release_ras_con;
3676         }
3677
3678         amdgpu_fence_driver_hw_init(adev);
3679
3680         dev_info(adev->dev,
3681                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3682                         adev->gfx.config.max_shader_engines,
3683                         adev->gfx.config.max_sh_per_se,
3684                         adev->gfx.config.max_cu_per_sh,
3685                         adev->gfx.cu_info.number);
3686
3687         adev->accel_working = true;
3688
3689         amdgpu_vm_check_compute_bug(adev);
3690
3691         /* Initialize the buffer migration limit. */
3692         if (amdgpu_moverate >= 0)
3693                 max_MBps = amdgpu_moverate;
3694         else
3695                 max_MBps = 8; /* Allow 8 MB/s. */
3696         /* Get a log2 for easy divisions. */
3697         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3698
3699         amdgpu_fbdev_init(adev);
3700
3701         r = amdgpu_pm_sysfs_init(adev);
3702         if (r) {
3703                 adev->pm_sysfs_en = false;
3704                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3705         } else
3706                 adev->pm_sysfs_en = true;
3707
3708         r = amdgpu_ucode_sysfs_init(adev);
3709         if (r) {
3710                 adev->ucode_sysfs_en = false;
3711                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3712         } else
3713                 adev->ucode_sysfs_en = true;
3714
3715         if ((amdgpu_testing & 1)) {
3716                 if (adev->accel_working)
3717                         amdgpu_test_moves(adev);
3718                 else
3719                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3720         }
3721         if (amdgpu_benchmarking) {
3722                 if (adev->accel_working)
3723                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3724                 else
3725                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3726         }
3727
3728         /*
3729          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3730          * Otherwise the mgpu fan boost feature will be skipped due to the
3731          * gpu instance is counted less.
3732          */
3733         amdgpu_register_gpu_instance(adev);
3734
3735         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3736          * explicit gating rather than handling it automatically.
3737          */
3738         if (!adev->gmc.xgmi.pending_reset) {
3739                 r = amdgpu_device_ip_late_init(adev);
3740                 if (r) {
3741                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3742                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3743                         goto release_ras_con;
3744                 }
3745                 /* must succeed. */
3746                 amdgpu_ras_resume(adev);
3747                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3748                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3749         }
3750
3751         if (amdgpu_sriov_vf(adev))
3752                 flush_delayed_work(&adev->delayed_init_work);
3753
3754         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3755         if (r)
3756                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3757
3758         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3759                 r = amdgpu_pmu_init(adev);
3760         if (r)
3761                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3762
3763         /* Have stored pci confspace at hand for restore in sudden PCI error */
3764         if (amdgpu_device_cache_pci_state(adev->pdev))
3765                 pci_restore_state(pdev);
3766
3767         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3768         /* this will fail for cards that aren't VGA class devices, just
3769          * ignore it */
3770         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3771                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3772
3773         if (amdgpu_device_supports_px(ddev)) {
3774                 px = true;
3775                 vga_switcheroo_register_client(adev->pdev,
3776                                                &amdgpu_switcheroo_ops, px);
3777                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3778         }
3779
3780         if (adev->gmc.xgmi.pending_reset)
3781                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3782                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3783
3784         return 0;
3785
3786 release_ras_con:
3787         amdgpu_release_ras_context(adev);
3788
3789 failed:
3790         amdgpu_vf_error_trans_all(adev);
3791
3792         return r;
3793 }
3794
3795 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3796 {
3797         /* Clear all CPU mappings pointing to this device */
3798         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3799
3800         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3801         amdgpu_device_doorbell_fini(adev);
3802
3803         iounmap(adev->rmmio);
3804         adev->rmmio = NULL;
3805         if (adev->mman.aper_base_kaddr)
3806                 iounmap(adev->mman.aper_base_kaddr);
3807         adev->mman.aper_base_kaddr = NULL;
3808
3809         /* Memory manager related */
3810         if (!adev->gmc.xgmi.connected_to_cpu) {
3811                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3812                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3813         }
3814 }
3815
3816 /**
3817  * amdgpu_device_fini - tear down the driver
3818  *
3819  * @adev: amdgpu_device pointer
3820  *
3821  * Tear down the driver info (all asics).
3822  * Called at driver shutdown.
3823  */
3824 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3825 {
3826         dev_info(adev->dev, "amdgpu: finishing device.\n");
3827         flush_delayed_work(&adev->delayed_init_work);
3828         if (adev->mman.initialized) {
3829                 flush_delayed_work(&adev->mman.bdev.wq);
3830                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3831         }
3832         adev->shutdown = true;
3833
3834         /* make sure IB test finished before entering exclusive mode
3835          * to avoid preemption on IB test
3836          * */
3837         if (amdgpu_sriov_vf(adev)) {
3838                 amdgpu_virt_request_full_gpu(adev, false);
3839                 amdgpu_virt_fini_data_exchange(adev);
3840         }
3841
3842         /* disable all interrupts */
3843         amdgpu_irq_disable_all(adev);
3844         if (adev->mode_info.mode_config_initialized){
3845                 if (!amdgpu_device_has_dc_support(adev))
3846                         drm_helper_force_disable_all(adev_to_drm(adev));
3847                 else
3848                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3849         }
3850         amdgpu_fence_driver_hw_fini(adev);
3851
3852         if (adev->pm_sysfs_en)
3853                 amdgpu_pm_sysfs_fini(adev);
3854         if (adev->ucode_sysfs_en)
3855                 amdgpu_ucode_sysfs_fini(adev);
3856         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3857
3858         amdgpu_fbdev_fini(adev);
3859
3860         amdgpu_irq_fini_hw(adev);
3861
3862         amdgpu_device_ip_fini_early(adev);
3863
3864         amdgpu_gart_dummy_page_fini(adev);
3865
3866         amdgpu_device_unmap_mmio(adev);
3867 }
3868
3869 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3870 {
3871         amdgpu_device_ip_fini(adev);
3872         amdgpu_fence_driver_sw_fini(adev);
3873         release_firmware(adev->firmware.gpu_info_fw);
3874         adev->firmware.gpu_info_fw = NULL;
3875         adev->accel_working = false;
3876
3877         amdgpu_reset_fini(adev);
3878
3879         /* free i2c buses */
3880         if (!amdgpu_device_has_dc_support(adev))
3881                 amdgpu_i2c_fini(adev);
3882
3883         if (amdgpu_emu_mode != 1)
3884                 amdgpu_atombios_fini(adev);
3885
3886         kfree(adev->bios);
3887         adev->bios = NULL;
3888         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3889                 vga_switcheroo_unregister_client(adev->pdev);
3890                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3891         }
3892         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3893                 vga_client_unregister(adev->pdev);
3894
3895         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3896                 amdgpu_pmu_fini(adev);
3897         if (adev->mman.discovery_bin)
3898                 amdgpu_discovery_fini(adev);
3899
3900         kfree(adev->pci_state);
3901
3902 }
3903
3904
3905 /*
3906  * Suspend & resume.
3907  */
3908 /**
3909  * amdgpu_device_suspend - initiate device suspend
3910  *
3911  * @dev: drm dev pointer
3912  * @fbcon : notify the fbdev of suspend
3913  *
3914  * Puts the hw in the suspend state (all asics).
3915  * Returns 0 for success or an error on failure.
3916  * Called at driver suspend.
3917  */
3918 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3919 {
3920         struct amdgpu_device *adev = drm_to_adev(dev);
3921
3922         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3923                 return 0;
3924
3925         adev->in_suspend = true;
3926
3927         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3928                 DRM_WARN("smart shift update failed\n");
3929
3930         drm_kms_helper_poll_disable(dev);
3931
3932         if (fbcon)
3933                 amdgpu_fbdev_set_suspend(adev, 1);
3934
3935         cancel_delayed_work_sync(&adev->delayed_init_work);
3936
3937         amdgpu_ras_suspend(adev);
3938
3939         amdgpu_device_ip_suspend_phase1(adev);
3940
3941         if (!adev->in_s0ix)
3942                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3943
3944         /* evict vram memory */
3945         amdgpu_bo_evict_vram(adev);
3946
3947         amdgpu_fence_driver_hw_fini(adev);
3948
3949         amdgpu_device_ip_suspend_phase2(adev);
3950         /* evict remaining vram memory
3951          * This second call to evict vram is to evict the gart page table
3952          * using the CPU.
3953          */
3954         amdgpu_bo_evict_vram(adev);
3955
3956         return 0;
3957 }
3958
3959 /**
3960  * amdgpu_device_resume - initiate device resume
3961  *
3962  * @dev: drm dev pointer
3963  * @fbcon : notify the fbdev of resume
3964  *
3965  * Bring the hw back to operating state (all asics).
3966  * Returns 0 for success or an error on failure.
3967  * Called at driver resume.
3968  */
3969 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3970 {
3971         struct amdgpu_device *adev = drm_to_adev(dev);
3972         int r = 0;
3973
3974         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3975                 return 0;
3976
3977         if (adev->in_s0ix)
3978                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3979
3980         /* post card */
3981         if (amdgpu_device_need_post(adev)) {
3982                 r = amdgpu_device_asic_init(adev);
3983                 if (r)
3984                         dev_err(adev->dev, "amdgpu asic init failed\n");
3985         }
3986
3987         r = amdgpu_device_ip_resume(adev);
3988         if (r) {
3989                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3990                 return r;
3991         }
3992         amdgpu_fence_driver_hw_init(adev);
3993
3994         r = amdgpu_device_ip_late_init(adev);
3995         if (r)
3996                 return r;
3997
3998         queue_delayed_work(system_wq, &adev->delayed_init_work,
3999                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4000
4001         if (!adev->in_s0ix) {
4002                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4003                 if (r)
4004                         return r;
4005         }
4006
4007         /* Make sure IB tests flushed */
4008         flush_delayed_work(&adev->delayed_init_work);
4009
4010         if (fbcon)
4011                 amdgpu_fbdev_set_suspend(adev, 0);
4012
4013         drm_kms_helper_poll_enable(dev);
4014
4015         amdgpu_ras_resume(adev);
4016
4017         /*
4018          * Most of the connector probing functions try to acquire runtime pm
4019          * refs to ensure that the GPU is powered on when connector polling is
4020          * performed. Since we're calling this from a runtime PM callback,
4021          * trying to acquire rpm refs will cause us to deadlock.
4022          *
4023          * Since we're guaranteed to be holding the rpm lock, it's safe to
4024          * temporarily disable the rpm helpers so this doesn't deadlock us.
4025          */
4026 #ifdef CONFIG_PM
4027         dev->dev->power.disable_depth++;
4028 #endif
4029         if (!amdgpu_device_has_dc_support(adev))
4030                 drm_helper_hpd_irq_event(dev);
4031         else
4032                 drm_kms_helper_hotplug_event(dev);
4033 #ifdef CONFIG_PM
4034         dev->dev->power.disable_depth--;
4035 #endif
4036         adev->in_suspend = false;
4037
4038         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4039                 DRM_WARN("smart shift update failed\n");
4040
4041         return 0;
4042 }
4043
4044 /**
4045  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4046  *
4047  * @adev: amdgpu_device pointer
4048  *
4049  * The list of all the hardware IPs that make up the asic is walked and
4050  * the check_soft_reset callbacks are run.  check_soft_reset determines
4051  * if the asic is still hung or not.
4052  * Returns true if any of the IPs are still in a hung state, false if not.
4053  */
4054 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4055 {
4056         int i;
4057         bool asic_hang = false;
4058
4059         if (amdgpu_sriov_vf(adev))
4060                 return true;
4061
4062         if (amdgpu_asic_need_full_reset(adev))
4063                 return true;
4064
4065         for (i = 0; i < adev->num_ip_blocks; i++) {
4066                 if (!adev->ip_blocks[i].status.valid)
4067                         continue;
4068                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4069                         adev->ip_blocks[i].status.hang =
4070                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4071                 if (adev->ip_blocks[i].status.hang) {
4072                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4073                         asic_hang = true;
4074                 }
4075         }
4076         return asic_hang;
4077 }
4078
4079 /**
4080  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4081  *
4082  * @adev: amdgpu_device pointer
4083  *
4084  * The list of all the hardware IPs that make up the asic is walked and the
4085  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4086  * handles any IP specific hardware or software state changes that are
4087  * necessary for a soft reset to succeed.
4088  * Returns 0 on success, negative error code on failure.
4089  */
4090 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4091 {
4092         int i, r = 0;
4093
4094         for (i = 0; i < adev->num_ip_blocks; i++) {
4095                 if (!adev->ip_blocks[i].status.valid)
4096                         continue;
4097                 if (adev->ip_blocks[i].status.hang &&
4098                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4099                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4100                         if (r)
4101                                 return r;
4102                 }
4103         }
4104
4105         return 0;
4106 }
4107
4108 /**
4109  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4110  *
4111  * @adev: amdgpu_device pointer
4112  *
4113  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4114  * reset is necessary to recover.
4115  * Returns true if a full asic reset is required, false if not.
4116  */
4117 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4118 {
4119         int i;
4120
4121         if (amdgpu_asic_need_full_reset(adev))
4122                 return true;
4123
4124         for (i = 0; i < adev->num_ip_blocks; i++) {
4125                 if (!adev->ip_blocks[i].status.valid)
4126                         continue;
4127                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4128                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4129                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4130                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4131                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4132                         if (adev->ip_blocks[i].status.hang) {
4133                                 dev_info(adev->dev, "Some block need full reset!\n");
4134                                 return true;
4135                         }
4136                 }
4137         }
4138         return false;
4139 }
4140
4141 /**
4142  * amdgpu_device_ip_soft_reset - do a soft reset
4143  *
4144  * @adev: amdgpu_device pointer
4145  *
4146  * The list of all the hardware IPs that make up the asic is walked and the
4147  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4148  * IP specific hardware or software state changes that are necessary to soft
4149  * reset the IP.
4150  * Returns 0 on success, negative error code on failure.
4151  */
4152 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4153 {
4154         int i, r = 0;
4155
4156         for (i = 0; i < adev->num_ip_blocks; i++) {
4157                 if (!adev->ip_blocks[i].status.valid)
4158                         continue;
4159                 if (adev->ip_blocks[i].status.hang &&
4160                     adev->ip_blocks[i].version->funcs->soft_reset) {
4161                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4162                         if (r)
4163                                 return r;
4164                 }
4165         }
4166
4167         return 0;
4168 }
4169
4170 /**
4171  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4172  *
4173  * @adev: amdgpu_device pointer
4174  *
4175  * The list of all the hardware IPs that make up the asic is walked and the
4176  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4177  * handles any IP specific hardware or software state changes that are
4178  * necessary after the IP has been soft reset.
4179  * Returns 0 on success, negative error code on failure.
4180  */
4181 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4182 {
4183         int i, r = 0;
4184
4185         for (i = 0; i < adev->num_ip_blocks; i++) {
4186                 if (!adev->ip_blocks[i].status.valid)
4187                         continue;
4188                 if (adev->ip_blocks[i].status.hang &&
4189                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4190                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4191                 if (r)
4192                         return r;
4193         }
4194
4195         return 0;
4196 }
4197
4198 /**
4199  * amdgpu_device_recover_vram - Recover some VRAM contents
4200  *
4201  * @adev: amdgpu_device pointer
4202  *
4203  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4204  * restore things like GPUVM page tables after a GPU reset where
4205  * the contents of VRAM might be lost.
4206  *
4207  * Returns:
4208  * 0 on success, negative error code on failure.
4209  */
4210 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4211 {
4212         struct dma_fence *fence = NULL, *next = NULL;
4213         struct amdgpu_bo *shadow;
4214         struct amdgpu_bo_vm *vmbo;
4215         long r = 1, tmo;
4216
4217         if (amdgpu_sriov_runtime(adev))
4218                 tmo = msecs_to_jiffies(8000);
4219         else
4220                 tmo = msecs_to_jiffies(100);
4221
4222         dev_info(adev->dev, "recover vram bo from shadow start\n");
4223         mutex_lock(&adev->shadow_list_lock);
4224         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4225                 shadow = &vmbo->bo;
4226                 /* No need to recover an evicted BO */
4227                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4228                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4229                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4230                         continue;
4231
4232                 r = amdgpu_bo_restore_shadow(shadow, &next);
4233                 if (r)
4234                         break;
4235
4236                 if (fence) {
4237                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4238                         dma_fence_put(fence);
4239                         fence = next;
4240                         if (tmo == 0) {
4241                                 r = -ETIMEDOUT;
4242                                 break;
4243                         } else if (tmo < 0) {
4244                                 r = tmo;
4245                                 break;
4246                         }
4247                 } else {
4248                         fence = next;
4249                 }
4250         }
4251         mutex_unlock(&adev->shadow_list_lock);
4252
4253         if (fence)
4254                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4255         dma_fence_put(fence);
4256
4257         if (r < 0 || tmo <= 0) {
4258                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4259                 return -EIO;
4260         }
4261
4262         dev_info(adev->dev, "recover vram bo from shadow done\n");
4263         return 0;
4264 }
4265
4266
4267 /**
4268  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4269  *
4270  * @adev: amdgpu_device pointer
4271  * @from_hypervisor: request from hypervisor
4272  *
4273  * do VF FLR and reinitialize Asic
4274  * return 0 means succeeded otherwise failed
4275  */
4276 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4277                                      bool from_hypervisor)
4278 {
4279         int r;
4280
4281         if (from_hypervisor)
4282                 r = amdgpu_virt_request_full_gpu(adev, true);
4283         else
4284                 r = amdgpu_virt_reset_gpu(adev);
4285         if (r)
4286                 return r;
4287
4288         amdgpu_amdkfd_pre_reset(adev);
4289
4290         /* Resume IP prior to SMC */
4291         r = amdgpu_device_ip_reinit_early_sriov(adev);
4292         if (r)
4293                 goto error;
4294
4295         amdgpu_virt_init_data_exchange(adev);
4296         /* we need recover gart prior to run SMC/CP/SDMA resume */
4297         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4298
4299         r = amdgpu_device_fw_loading(adev);
4300         if (r)
4301                 return r;
4302
4303         /* now we are okay to resume SMC/CP/SDMA */
4304         r = amdgpu_device_ip_reinit_late_sriov(adev);
4305         if (r)
4306                 goto error;
4307
4308         amdgpu_irq_gpu_reset_resume_helper(adev);
4309         r = amdgpu_ib_ring_tests(adev);
4310         amdgpu_amdkfd_post_reset(adev);
4311
4312 error:
4313         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4314                 amdgpu_inc_vram_lost(adev);
4315                 r = amdgpu_device_recover_vram(adev);
4316         }
4317         amdgpu_virt_release_full_gpu(adev, true);
4318
4319         return r;
4320 }
4321
4322 /**
4323  * amdgpu_device_has_job_running - check if there is any job in mirror list
4324  *
4325  * @adev: amdgpu_device pointer
4326  *
4327  * check if there is any job in mirror list
4328  */
4329 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4330 {
4331         int i;
4332         struct drm_sched_job *job;
4333
4334         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4335                 struct amdgpu_ring *ring = adev->rings[i];
4336
4337                 if (!ring || !ring->sched.thread)
4338                         continue;
4339
4340                 spin_lock(&ring->sched.job_list_lock);
4341                 job = list_first_entry_or_null(&ring->sched.pending_list,
4342                                                struct drm_sched_job, list);
4343                 spin_unlock(&ring->sched.job_list_lock);
4344                 if (job)
4345                         return true;
4346         }
4347         return false;
4348 }
4349
4350 /**
4351  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4352  *
4353  * @adev: amdgpu_device pointer
4354  *
4355  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4356  * a hung GPU.
4357  */
4358 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4359 {
4360         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4361                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4362                 return false;
4363         }
4364
4365         if (amdgpu_gpu_recovery == 0)
4366                 goto disabled;
4367
4368         if (amdgpu_sriov_vf(adev))
4369                 return true;
4370
4371         if (amdgpu_gpu_recovery == -1) {
4372                 switch (adev->asic_type) {
4373                 case CHIP_BONAIRE:
4374                 case CHIP_HAWAII:
4375                 case CHIP_TOPAZ:
4376                 case CHIP_TONGA:
4377                 case CHIP_FIJI:
4378                 case CHIP_POLARIS10:
4379                 case CHIP_POLARIS11:
4380                 case CHIP_POLARIS12:
4381                 case CHIP_VEGAM:
4382                 case CHIP_VEGA20:
4383                 case CHIP_VEGA10:
4384                 case CHIP_VEGA12:
4385                 case CHIP_RAVEN:
4386                 case CHIP_ARCTURUS:
4387                 case CHIP_RENOIR:
4388                 case CHIP_NAVI10:
4389                 case CHIP_NAVI14:
4390                 case CHIP_NAVI12:
4391                 case CHIP_SIENNA_CICHLID:
4392                 case CHIP_NAVY_FLOUNDER:
4393                 case CHIP_DIMGREY_CAVEFISH:
4394                 case CHIP_BEIGE_GOBY:
4395                 case CHIP_VANGOGH:
4396                 case CHIP_ALDEBARAN:
4397                         break;
4398                 default:
4399                         goto disabled;
4400                 }
4401         }
4402
4403         return true;
4404
4405 disabled:
4406                 dev_info(adev->dev, "GPU recovery disabled.\n");
4407                 return false;
4408 }
4409
4410 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4411 {
4412         u32 i;
4413         int ret = 0;
4414
4415         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4416
4417         dev_info(adev->dev, "GPU mode1 reset\n");
4418
4419         /* disable BM */
4420         pci_clear_master(adev->pdev);
4421
4422         amdgpu_device_cache_pci_state(adev->pdev);
4423
4424         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4425                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4426                 ret = amdgpu_dpm_mode1_reset(adev);
4427         } else {
4428                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4429                 ret = psp_gpu_reset(adev);
4430         }
4431
4432         if (ret)
4433                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4434
4435         amdgpu_device_load_pci_state(adev->pdev);
4436
4437         /* wait for asic to come out of reset */
4438         for (i = 0; i < adev->usec_timeout; i++) {
4439                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4440
4441                 if (memsize != 0xffffffff)
4442                         break;
4443                 udelay(1);
4444         }
4445
4446         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4447         return ret;
4448 }
4449
4450 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4451                                  struct amdgpu_reset_context *reset_context)
4452 {
4453         int i, j, r = 0;
4454         struct amdgpu_job *job = NULL;
4455         bool need_full_reset =
4456                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4457
4458         if (reset_context->reset_req_dev == adev)
4459                 job = reset_context->job;
4460
4461         /* no need to dump if device is not in good state during probe period */
4462         if (!adev->gmc.xgmi.pending_reset)
4463                 amdgpu_debugfs_wait_dump(adev);
4464
4465         if (amdgpu_sriov_vf(adev)) {
4466                 /* stop the data exchange thread */
4467                 amdgpu_virt_fini_data_exchange(adev);
4468         }
4469
4470         /* block all schedulers and reset given job's ring */
4471         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4472                 struct amdgpu_ring *ring = adev->rings[i];
4473
4474                 if (!ring || !ring->sched.thread)
4475                         continue;
4476
4477                 /*clear job fence from fence drv to avoid force_completion
4478                  *leave NULL and vm flush fence in fence drv */
4479                 for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4480                         struct dma_fence *old, **ptr;
4481
4482                         ptr = &ring->fence_drv.fences[j];
4483                         old = rcu_dereference_protected(*ptr, 1);
4484                         if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4485                                 RCU_INIT_POINTER(*ptr, NULL);
4486                         }
4487                 }
4488                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4489                 amdgpu_fence_driver_force_completion(ring);
4490         }
4491
4492         if (job && job->vm)
4493                 drm_sched_increase_karma(&job->base);
4494
4495         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4496         /* If reset handler not implemented, continue; otherwise return */
4497         if (r == -ENOSYS)
4498                 r = 0;
4499         else
4500                 return r;
4501
4502         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4503         if (!amdgpu_sriov_vf(adev)) {
4504
4505                 if (!need_full_reset)
4506                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4507
4508                 if (!need_full_reset) {
4509                         amdgpu_device_ip_pre_soft_reset(adev);
4510                         r = amdgpu_device_ip_soft_reset(adev);
4511                         amdgpu_device_ip_post_soft_reset(adev);
4512                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4513                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4514                                 need_full_reset = true;
4515                         }
4516                 }
4517
4518                 if (need_full_reset)
4519                         r = amdgpu_device_ip_suspend(adev);
4520                 if (need_full_reset)
4521                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4522                 else
4523                         clear_bit(AMDGPU_NEED_FULL_RESET,
4524                                   &reset_context->flags);
4525         }
4526
4527         return r;
4528 }
4529
4530 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4531                          struct amdgpu_reset_context *reset_context)
4532 {
4533         struct amdgpu_device *tmp_adev = NULL;
4534         bool need_full_reset, skip_hw_reset, vram_lost = false;
4535         int r = 0;
4536
4537         /* Try reset handler method first */
4538         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4539                                     reset_list);
4540         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4541         /* If reset handler not implemented, continue; otherwise return */
4542         if (r == -ENOSYS)
4543                 r = 0;
4544         else
4545                 return r;
4546
4547         /* Reset handler not implemented, use the default method */
4548         need_full_reset =
4549                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4550         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4551
4552         /*
4553          * ASIC reset has to be done on all XGMI hive nodes ASAP
4554          * to allow proper links negotiation in FW (within 1 sec)
4555          */
4556         if (!skip_hw_reset && need_full_reset) {
4557                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4558                         /* For XGMI run all resets in parallel to speed up the process */
4559                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4560                                 tmp_adev->gmc.xgmi.pending_reset = false;
4561                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4562                                         r = -EALREADY;
4563                         } else
4564                                 r = amdgpu_asic_reset(tmp_adev);
4565
4566                         if (r) {
4567                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4568                                          r, adev_to_drm(tmp_adev)->unique);
4569                                 break;
4570                         }
4571                 }
4572
4573                 /* For XGMI wait for all resets to complete before proceed */
4574                 if (!r) {
4575                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4576                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4577                                         flush_work(&tmp_adev->xgmi_reset_work);
4578                                         r = tmp_adev->asic_reset_res;
4579                                         if (r)
4580                                                 break;
4581                                 }
4582                         }
4583                 }
4584         }
4585
4586         if (!r && amdgpu_ras_intr_triggered()) {
4587                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4588                         if (tmp_adev->mmhub.ras_funcs &&
4589                             tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4590                                 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4591                 }
4592
4593                 amdgpu_ras_intr_cleared();
4594         }
4595
4596         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4597                 if (need_full_reset) {
4598                         /* post card */
4599                         r = amdgpu_device_asic_init(tmp_adev);
4600                         if (r) {
4601                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4602                         } else {
4603                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4604                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4605                                 if (r)
4606                                         goto out;
4607
4608                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4609                                 if (vram_lost) {
4610                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4611                                         amdgpu_inc_vram_lost(tmp_adev);
4612                                 }
4613
4614                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4615                                 if (r)
4616                                         goto out;
4617
4618                                 r = amdgpu_device_fw_loading(tmp_adev);
4619                                 if (r)
4620                                         return r;
4621
4622                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4623                                 if (r)
4624                                         goto out;
4625
4626                                 if (vram_lost)
4627                                         amdgpu_device_fill_reset_magic(tmp_adev);
4628
4629                                 /*
4630                                  * Add this ASIC as tracked as reset was already
4631                                  * complete successfully.
4632                                  */
4633                                 amdgpu_register_gpu_instance(tmp_adev);
4634
4635                                 if (!reset_context->hive &&
4636                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4637                                         amdgpu_xgmi_add_device(tmp_adev);
4638
4639                                 r = amdgpu_device_ip_late_init(tmp_adev);
4640                                 if (r)
4641                                         goto out;
4642
4643                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4644
4645                                 /*
4646                                  * The GPU enters bad state once faulty pages
4647                                  * by ECC has reached the threshold, and ras
4648                                  * recovery is scheduled next. So add one check
4649                                  * here to break recovery if it indeed exceeds
4650                                  * bad page threshold, and remind user to
4651                                  * retire this GPU or setting one bigger
4652                                  * bad_page_threshold value to fix this once
4653                                  * probing driver again.
4654                                  */
4655                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4656                                         /* must succeed. */
4657                                         amdgpu_ras_resume(tmp_adev);
4658                                 } else {
4659                                         r = -EINVAL;
4660                                         goto out;
4661                                 }
4662
4663                                 /* Update PSP FW topology after reset */
4664                                 if (reset_context->hive &&
4665                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4666                                         r = amdgpu_xgmi_update_topology(
4667                                                 reset_context->hive, tmp_adev);
4668                         }
4669                 }
4670
4671 out:
4672                 if (!r) {
4673                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4674                         r = amdgpu_ib_ring_tests(tmp_adev);
4675                         if (r) {
4676                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4677                                 need_full_reset = true;
4678                                 r = -EAGAIN;
4679                                 goto end;
4680                         }
4681                 }
4682
4683                 if (!r)
4684                         r = amdgpu_device_recover_vram(tmp_adev);
4685                 else
4686                         tmp_adev->asic_reset_res = r;
4687         }
4688
4689 end:
4690         if (need_full_reset)
4691                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4692         else
4693                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4694         return r;
4695 }
4696
4697 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4698                                 struct amdgpu_hive_info *hive)
4699 {
4700         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4701                 return false;
4702
4703         if (hive) {
4704                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4705         } else {
4706                 down_write(&adev->reset_sem);
4707         }
4708
4709         switch (amdgpu_asic_reset_method(adev)) {
4710         case AMD_RESET_METHOD_MODE1:
4711                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4712                 break;
4713         case AMD_RESET_METHOD_MODE2:
4714                 adev->mp1_state = PP_MP1_STATE_RESET;
4715                 break;
4716         default:
4717                 adev->mp1_state = PP_MP1_STATE_NONE;
4718                 break;
4719         }
4720
4721         return true;
4722 }
4723
4724 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4725 {
4726         amdgpu_vf_error_trans_all(adev);
4727         adev->mp1_state = PP_MP1_STATE_NONE;
4728         atomic_set(&adev->in_gpu_reset, 0);
4729         up_write(&adev->reset_sem);
4730 }
4731
4732 /*
4733  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4734  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4735  *
4736  * unlock won't require roll back.
4737  */
4738 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4739 {
4740         struct amdgpu_device *tmp_adev = NULL;
4741
4742         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4743                 if (!hive) {
4744                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4745                         return -ENODEV;
4746                 }
4747                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4748                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4749                                 goto roll_back;
4750                 }
4751         } else if (!amdgpu_device_lock_adev(adev, hive))
4752                 return -EAGAIN;
4753
4754         return 0;
4755 roll_back:
4756         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4757                 /*
4758                  * if the lockup iteration break in the middle of a hive,
4759                  * it may means there may has a race issue,
4760                  * or a hive device locked up independently.
4761                  * we may be in trouble and may not, so will try to roll back
4762                  * the lock and give out a warnning.
4763                  */
4764                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4765                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4766                         amdgpu_device_unlock_adev(tmp_adev);
4767                 }
4768         }
4769         return -EAGAIN;
4770 }
4771
4772 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4773 {
4774         struct pci_dev *p = NULL;
4775
4776         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4777                         adev->pdev->bus->number, 1);
4778         if (p) {
4779                 pm_runtime_enable(&(p->dev));
4780                 pm_runtime_resume(&(p->dev));
4781         }
4782 }
4783
4784 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4785 {
4786         enum amd_reset_method reset_method;
4787         struct pci_dev *p = NULL;
4788         u64 expires;
4789
4790         /*
4791          * For now, only BACO and mode1 reset are confirmed
4792          * to suffer the audio issue without proper suspended.
4793          */
4794         reset_method = amdgpu_asic_reset_method(adev);
4795         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4796              (reset_method != AMD_RESET_METHOD_MODE1))
4797                 return -EINVAL;
4798
4799         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4800                         adev->pdev->bus->number, 1);
4801         if (!p)
4802                 return -ENODEV;
4803
4804         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4805         if (!expires)
4806                 /*
4807                  * If we cannot get the audio device autosuspend delay,
4808                  * a fixed 4S interval will be used. Considering 3S is
4809                  * the audio controller default autosuspend delay setting.
4810                  * 4S used here is guaranteed to cover that.
4811                  */
4812                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4813
4814         while (!pm_runtime_status_suspended(&(p->dev))) {
4815                 if (!pm_runtime_suspend(&(p->dev)))
4816                         break;
4817
4818                 if (expires < ktime_get_mono_fast_ns()) {
4819                         dev_warn(adev->dev, "failed to suspend display audio\n");
4820                         /* TODO: abort the succeeding gpu reset? */
4821                         return -ETIMEDOUT;
4822                 }
4823         }
4824
4825         pm_runtime_disable(&(p->dev));
4826
4827         return 0;
4828 }
4829
4830 static void amdgpu_device_recheck_guilty_jobs(
4831         struct amdgpu_device *adev, struct list_head *device_list_handle,
4832         struct amdgpu_reset_context *reset_context)
4833 {
4834         int i, r = 0;
4835
4836         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4837                 struct amdgpu_ring *ring = adev->rings[i];
4838                 int ret = 0;
4839                 struct drm_sched_job *s_job;
4840
4841                 if (!ring || !ring->sched.thread)
4842                         continue;
4843
4844                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4845                                 struct drm_sched_job, list);
4846                 if (s_job == NULL)
4847                         continue;
4848
4849                 /* clear job's guilty and depend the folowing step to decide the real one */
4850                 drm_sched_reset_karma(s_job);
4851                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4852
4853                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4854                 if (ret == 0) { /* timeout */
4855                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4856                                                 ring->sched.name, s_job->id);
4857
4858                         /* set guilty */
4859                         drm_sched_increase_karma(s_job);
4860 retry:
4861                         /* do hw reset */
4862                         if (amdgpu_sriov_vf(adev)) {
4863                                 amdgpu_virt_fini_data_exchange(adev);
4864                                 r = amdgpu_device_reset_sriov(adev, false);
4865                                 if (r)
4866                                         adev->asic_reset_res = r;
4867                         } else {
4868                                 clear_bit(AMDGPU_SKIP_HW_RESET,
4869                                           &reset_context->flags);
4870                                 r = amdgpu_do_asic_reset(device_list_handle,
4871                                                          reset_context);
4872                                 if (r && r == -EAGAIN)
4873                                         goto retry;
4874                         }
4875
4876                         /*
4877                          * add reset counter so that the following
4878                          * resubmitted job could flush vmid
4879                          */
4880                         atomic_inc(&adev->gpu_reset_counter);
4881                         continue;
4882                 }
4883
4884                 /* got the hw fence, signal finished fence */
4885                 atomic_dec(ring->sched.score);
4886                 dma_fence_get(&s_job->s_fence->finished);
4887                 dma_fence_signal(&s_job->s_fence->finished);
4888                 dma_fence_put(&s_job->s_fence->finished);
4889
4890                 /* remove node from list and free the job */
4891                 spin_lock(&ring->sched.job_list_lock);
4892                 list_del_init(&s_job->list);
4893                 spin_unlock(&ring->sched.job_list_lock);
4894                 ring->sched.ops->free_job(s_job);
4895         }
4896 }
4897
4898 /**
4899  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4900  *
4901  * @adev: amdgpu_device pointer
4902  * @job: which job trigger hang
4903  *
4904  * Attempt to reset the GPU if it has hung (all asics).
4905  * Attempt to do soft-reset or full-reset and reinitialize Asic
4906  * Returns 0 for success or an error on failure.
4907  */
4908
4909 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4910                               struct amdgpu_job *job)
4911 {
4912         struct list_head device_list, *device_list_handle =  NULL;
4913         bool job_signaled = false;
4914         struct amdgpu_hive_info *hive = NULL;
4915         struct amdgpu_device *tmp_adev = NULL;
4916         int i, r = 0;
4917         bool need_emergency_restart = false;
4918         bool audio_suspended = false;
4919         int tmp_vram_lost_counter;
4920         struct amdgpu_reset_context reset_context;
4921
4922         memset(&reset_context, 0, sizeof(reset_context));
4923
4924         /*
4925          * Special case: RAS triggered and full reset isn't supported
4926          */
4927         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4928
4929         /*
4930          * Flush RAM to disk so that after reboot
4931          * the user can read log and see why the system rebooted.
4932          */
4933         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4934                 DRM_WARN("Emergency reboot.");
4935
4936                 ksys_sync_helper();
4937                 emergency_restart();
4938         }
4939
4940         dev_info(adev->dev, "GPU %s begin!\n",
4941                 need_emergency_restart ? "jobs stop":"reset");
4942
4943         /*
4944          * Here we trylock to avoid chain of resets executing from
4945          * either trigger by jobs on different adevs in XGMI hive or jobs on
4946          * different schedulers for same device while this TO handler is running.
4947          * We always reset all schedulers for device and all devices for XGMI
4948          * hive so that should take care of them too.
4949          */
4950         hive = amdgpu_get_xgmi_hive(adev);
4951         if (hive) {
4952                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4953                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4954                                 job ? job->base.id : -1, hive->hive_id);
4955                         amdgpu_put_xgmi_hive(hive);
4956                         if (job && job->vm)
4957                                 drm_sched_increase_karma(&job->base);
4958                         return 0;
4959                 }
4960                 mutex_lock(&hive->hive_lock);
4961         }
4962
4963         reset_context.method = AMD_RESET_METHOD_NONE;
4964         reset_context.reset_req_dev = adev;
4965         reset_context.job = job;
4966         reset_context.hive = hive;
4967         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4968
4969         /*
4970          * lock the device before we try to operate the linked list
4971          * if didn't get the device lock, don't touch the linked list since
4972          * others may iterating it.
4973          */
4974         r = amdgpu_device_lock_hive_adev(adev, hive);
4975         if (r) {
4976                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4977                                         job ? job->base.id : -1);
4978
4979                 /* even we skipped this reset, still need to set the job to guilty */
4980                 if (job && job->vm)
4981                         drm_sched_increase_karma(&job->base);
4982                 goto skip_recovery;
4983         }
4984
4985         /*
4986          * Build list of devices to reset.
4987          * In case we are in XGMI hive mode, resort the device list
4988          * to put adev in the 1st position.
4989          */
4990         INIT_LIST_HEAD(&device_list);
4991         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4992                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4993                         list_add_tail(&tmp_adev->reset_list, &device_list);
4994                 if (!list_is_first(&adev->reset_list, &device_list))
4995                         list_rotate_to_front(&adev->reset_list, &device_list);
4996                 device_list_handle = &device_list;
4997         } else {
4998                 list_add_tail(&adev->reset_list, &device_list);
4999                 device_list_handle = &device_list;
5000         }
5001
5002         /* block all schedulers and reset given job's ring */
5003         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5004                 /*
5005                  * Try to put the audio codec into suspend state
5006                  * before gpu reset started.
5007                  *
5008                  * Due to the power domain of the graphics device
5009                  * is shared with AZ power domain. Without this,
5010                  * we may change the audio hardware from behind
5011                  * the audio driver's back. That will trigger
5012                  * some audio codec errors.
5013                  */
5014                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5015                         audio_suspended = true;
5016
5017                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5018
5019                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5020
5021                 if (!amdgpu_sriov_vf(tmp_adev))
5022                         amdgpu_amdkfd_pre_reset(tmp_adev);
5023
5024                 /*
5025                  * Mark these ASICs to be reseted as untracked first
5026                  * And add them back after reset completed
5027                  */
5028                 amdgpu_unregister_gpu_instance(tmp_adev);
5029
5030                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
5031
5032                 /* disable ras on ALL IPs */
5033                 if (!need_emergency_restart &&
5034                       amdgpu_device_ip_need_full_reset(tmp_adev))
5035                         amdgpu_ras_suspend(tmp_adev);
5036
5037                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5038                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5039
5040                         if (!ring || !ring->sched.thread)
5041                                 continue;
5042
5043                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5044
5045                         if (need_emergency_restart)
5046                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5047                 }
5048                 atomic_inc(&tmp_adev->gpu_reset_counter);
5049         }
5050
5051         if (need_emergency_restart)
5052                 goto skip_sched_resume;
5053
5054         /*
5055          * Must check guilty signal here since after this point all old
5056          * HW fences are force signaled.
5057          *
5058          * job->base holds a reference to parent fence
5059          */
5060         if (job && job->base.s_fence->parent &&
5061             dma_fence_is_signaled(job->base.s_fence->parent)) {
5062                 job_signaled = true;
5063                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5064                 goto skip_hw_reset;
5065         }
5066
5067 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5068         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5069                 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5070                 /*TODO Should we stop ?*/
5071                 if (r) {
5072                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5073                                   r, adev_to_drm(tmp_adev)->unique);
5074                         tmp_adev->asic_reset_res = r;
5075                 }
5076         }
5077
5078         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5079         /* Actual ASIC resets if needed.*/
5080         /* TODO Implement XGMI hive reset logic for SRIOV */
5081         if (amdgpu_sriov_vf(adev)) {
5082                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5083                 if (r)
5084                         adev->asic_reset_res = r;
5085         } else {
5086                 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5087                 if (r && r == -EAGAIN)
5088                         goto retry;
5089         }
5090
5091 skip_hw_reset:
5092
5093         /* Post ASIC reset for all devs .*/
5094         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5095
5096                 /*
5097                  * Sometimes a later bad compute job can block a good gfx job as gfx
5098                  * and compute ring share internal GC HW mutually. We add an additional
5099                  * guilty jobs recheck step to find the real guilty job, it synchronously
5100                  * submits and pends for the first job being signaled. If it gets timeout,
5101                  * we identify it as a real guilty job.
5102                  */
5103                 if (amdgpu_gpu_recovery == 2 &&
5104                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5105                         amdgpu_device_recheck_guilty_jobs(
5106                                 tmp_adev, device_list_handle, &reset_context);
5107
5108                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5109                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5110
5111                         if (!ring || !ring->sched.thread)
5112                                 continue;
5113
5114                         /* No point to resubmit jobs if we didn't HW reset*/
5115                         if (!tmp_adev->asic_reset_res && !job_signaled)
5116                                 drm_sched_resubmit_jobs(&ring->sched);
5117
5118                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5119                 }
5120
5121                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5122                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5123                 }
5124
5125                 tmp_adev->asic_reset_res = 0;
5126
5127                 if (r) {
5128                         /* bad news, how to tell it to userspace ? */
5129                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5130                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5131                 } else {
5132                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5133                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5134                                 DRM_WARN("smart shift update failed\n");
5135                 }
5136         }
5137
5138 skip_sched_resume:
5139         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5140                 /* unlock kfd: SRIOV would do it separately */
5141                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5142                         amdgpu_amdkfd_post_reset(tmp_adev);
5143
5144                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5145                  * need to bring up kfd here if it's not be initialized before
5146                  */
5147                 if (!adev->kfd.init_complete)
5148                         amdgpu_amdkfd_device_init(adev);
5149
5150                 if (audio_suspended)
5151                         amdgpu_device_resume_display_audio(tmp_adev);
5152                 amdgpu_device_unlock_adev(tmp_adev);
5153         }
5154
5155 skip_recovery:
5156         if (hive) {
5157                 atomic_set(&hive->in_reset, 0);
5158                 mutex_unlock(&hive->hive_lock);
5159                 amdgpu_put_xgmi_hive(hive);
5160         }
5161
5162         if (r && r != -EAGAIN)
5163                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5164         return r;
5165 }
5166
5167 /**
5168  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5169  *
5170  * @adev: amdgpu_device pointer
5171  *
5172  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5173  * and lanes) of the slot the device is in. Handles APUs and
5174  * virtualized environments where PCIE config space may not be available.
5175  */
5176 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5177 {
5178         struct pci_dev *pdev;
5179         enum pci_bus_speed speed_cap, platform_speed_cap;
5180         enum pcie_link_width platform_link_width;
5181
5182         if (amdgpu_pcie_gen_cap)
5183                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5184
5185         if (amdgpu_pcie_lane_cap)
5186                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5187
5188         /* covers APUs as well */
5189         if (pci_is_root_bus(adev->pdev->bus)) {
5190                 if (adev->pm.pcie_gen_mask == 0)
5191                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5192                 if (adev->pm.pcie_mlw_mask == 0)
5193                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5194                 return;
5195         }
5196
5197         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5198                 return;
5199
5200         pcie_bandwidth_available(adev->pdev, NULL,
5201                                  &platform_speed_cap, &platform_link_width);
5202
5203         if (adev->pm.pcie_gen_mask == 0) {
5204                 /* asic caps */
5205                 pdev = adev->pdev;
5206                 speed_cap = pcie_get_speed_cap(pdev);
5207                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5208                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5209                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5210                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5211                 } else {
5212                         if (speed_cap == PCIE_SPEED_32_0GT)
5213                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5214                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5215                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5216                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5217                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5218                         else if (speed_cap == PCIE_SPEED_16_0GT)
5219                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5220                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5221                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5222                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5223                         else if (speed_cap == PCIE_SPEED_8_0GT)
5224                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5225                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5226                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5227                         else if (speed_cap == PCIE_SPEED_5_0GT)
5228                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5229                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5230                         else
5231                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5232                 }
5233                 /* platform caps */
5234                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5235                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5236                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5237                 } else {
5238                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5239                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5240                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5241                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5242                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5243                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5244                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5245                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5246                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5247                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5248                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5249                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5250                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5251                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5252                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5253                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5254                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5255                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5256                         else
5257                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5258
5259                 }
5260         }
5261         if (adev->pm.pcie_mlw_mask == 0) {
5262                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5263                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5264                 } else {
5265                         switch (platform_link_width) {
5266                         case PCIE_LNK_X32:
5267                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5268                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5269                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5270                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5271                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5272                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5273                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5274                                 break;
5275                         case PCIE_LNK_X16:
5276                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5277                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5278                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5279                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5280                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5281                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5282                                 break;
5283                         case PCIE_LNK_X12:
5284                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5285                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5286                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5287                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5288                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5289                                 break;
5290                         case PCIE_LNK_X8:
5291                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5292                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5293                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5294                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5295                                 break;
5296                         case PCIE_LNK_X4:
5297                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5298                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5299                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5300                                 break;
5301                         case PCIE_LNK_X2:
5302                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5303                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5304                                 break;
5305                         case PCIE_LNK_X1:
5306                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5307                                 break;
5308                         default:
5309                                 break;
5310                         }
5311                 }
5312         }
5313 }
5314
5315 int amdgpu_device_baco_enter(struct drm_device *dev)
5316 {
5317         struct amdgpu_device *adev = drm_to_adev(dev);
5318         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5319
5320         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5321                 return -ENOTSUPP;
5322
5323         if (ras && adev->ras_enabled &&
5324             adev->nbio.funcs->enable_doorbell_interrupt)
5325                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5326
5327         return amdgpu_dpm_baco_enter(adev);
5328 }
5329
5330 int amdgpu_device_baco_exit(struct drm_device *dev)
5331 {
5332         struct amdgpu_device *adev = drm_to_adev(dev);
5333         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5334         int ret = 0;
5335
5336         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5337                 return -ENOTSUPP;
5338
5339         ret = amdgpu_dpm_baco_exit(adev);
5340         if (ret)
5341                 return ret;
5342
5343         if (ras && adev->ras_enabled &&
5344             adev->nbio.funcs->enable_doorbell_interrupt)
5345                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5346
5347         if (amdgpu_passthrough(adev) &&
5348             adev->nbio.funcs->clear_doorbell_interrupt)
5349                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5350
5351         return 0;
5352 }
5353
5354 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5355 {
5356         int i;
5357
5358         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5359                 struct amdgpu_ring *ring = adev->rings[i];
5360
5361                 if (!ring || !ring->sched.thread)
5362                         continue;
5363
5364                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5365         }
5366 }
5367
5368 /**
5369  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5370  * @pdev: PCI device struct
5371  * @state: PCI channel state
5372  *
5373  * Description: Called when a PCI error is detected.
5374  *
5375  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5376  */
5377 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5378 {
5379         struct drm_device *dev = pci_get_drvdata(pdev);
5380         struct amdgpu_device *adev = drm_to_adev(dev);
5381         int i;
5382
5383         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5384
5385         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5386                 DRM_WARN("No support for XGMI hive yet...");
5387                 return PCI_ERS_RESULT_DISCONNECT;
5388         }
5389
5390         switch (state) {
5391         case pci_channel_io_normal:
5392                 return PCI_ERS_RESULT_CAN_RECOVER;
5393         /* Fatal error, prepare for slot reset */
5394         case pci_channel_io_frozen:
5395                 /*
5396                  * Cancel and wait for all TDRs in progress if failing to
5397                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5398                  *
5399                  * Locking adev->reset_sem will prevent any external access
5400                  * to GPU during PCI error recovery
5401                  */
5402                 while (!amdgpu_device_lock_adev(adev, NULL))
5403                         amdgpu_cancel_all_tdr(adev);
5404
5405                 /*
5406                  * Block any work scheduling as we do for regular GPU reset
5407                  * for the duration of the recovery
5408                  */
5409                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5410                         struct amdgpu_ring *ring = adev->rings[i];
5411
5412                         if (!ring || !ring->sched.thread)
5413                                 continue;
5414
5415                         drm_sched_stop(&ring->sched, NULL);
5416                 }
5417                 atomic_inc(&adev->gpu_reset_counter);
5418                 return PCI_ERS_RESULT_NEED_RESET;
5419         case pci_channel_io_perm_failure:
5420                 /* Permanent error, prepare for device removal */
5421                 return PCI_ERS_RESULT_DISCONNECT;
5422         }
5423
5424         return PCI_ERS_RESULT_NEED_RESET;
5425 }
5426
5427 /**
5428  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5429  * @pdev: pointer to PCI device
5430  */
5431 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5432 {
5433
5434         DRM_INFO("PCI error: mmio enabled callback!!\n");
5435
5436         /* TODO - dump whatever for debugging purposes */
5437
5438         /* This called only if amdgpu_pci_error_detected returns
5439          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5440          * works, no need to reset slot.
5441          */
5442
5443         return PCI_ERS_RESULT_RECOVERED;
5444 }
5445
5446 /**
5447  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5448  * @pdev: PCI device struct
5449  *
5450  * Description: This routine is called by the pci error recovery
5451  * code after the PCI slot has been reset, just before we
5452  * should resume normal operations.
5453  */
5454 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5455 {
5456         struct drm_device *dev = pci_get_drvdata(pdev);
5457         struct amdgpu_device *adev = drm_to_adev(dev);
5458         int r, i;
5459         struct amdgpu_reset_context reset_context;
5460         u32 memsize;
5461         struct list_head device_list;
5462
5463         DRM_INFO("PCI error: slot reset callback!!\n");
5464
5465         memset(&reset_context, 0, sizeof(reset_context));
5466
5467         INIT_LIST_HEAD(&device_list);
5468         list_add_tail(&adev->reset_list, &device_list);
5469
5470         /* wait for asic to come out of reset */
5471         msleep(500);
5472
5473         /* Restore PCI confspace */
5474         amdgpu_device_load_pci_state(pdev);
5475
5476         /* confirm  ASIC came out of reset */
5477         for (i = 0; i < adev->usec_timeout; i++) {
5478                 memsize = amdgpu_asic_get_config_memsize(adev);
5479
5480                 if (memsize != 0xffffffff)
5481                         break;
5482                 udelay(1);
5483         }
5484         if (memsize == 0xffffffff) {
5485                 r = -ETIME;
5486                 goto out;
5487         }
5488
5489         reset_context.method = AMD_RESET_METHOD_NONE;
5490         reset_context.reset_req_dev = adev;
5491         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5492         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5493
5494         adev->no_hw_access = true;
5495         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5496         adev->no_hw_access = false;
5497         if (r)
5498                 goto out;
5499
5500         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5501
5502 out:
5503         if (!r) {
5504                 if (amdgpu_device_cache_pci_state(adev->pdev))
5505                         pci_restore_state(adev->pdev);
5506
5507                 DRM_INFO("PCIe error recovery succeeded\n");
5508         } else {
5509                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5510                 amdgpu_device_unlock_adev(adev);
5511         }
5512
5513         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5514 }
5515
5516 /**
5517  * amdgpu_pci_resume() - resume normal ops after PCI reset
5518  * @pdev: pointer to PCI device
5519  *
5520  * Called when the error recovery driver tells us that its
5521  * OK to resume normal operation.
5522  */
5523 void amdgpu_pci_resume(struct pci_dev *pdev)
5524 {
5525         struct drm_device *dev = pci_get_drvdata(pdev);
5526         struct amdgpu_device *adev = drm_to_adev(dev);
5527         int i;
5528
5529
5530         DRM_INFO("PCI error: resume callback!!\n");
5531
5532         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5533                 struct amdgpu_ring *ring = adev->rings[i];
5534
5535                 if (!ring || !ring->sched.thread)
5536                         continue;
5537
5538
5539                 drm_sched_resubmit_jobs(&ring->sched);
5540                 drm_sched_start(&ring->sched, true);
5541         }
5542
5543         amdgpu_device_unlock_adev(adev);
5544 }
5545
5546 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5547 {
5548         struct drm_device *dev = pci_get_drvdata(pdev);
5549         struct amdgpu_device *adev = drm_to_adev(dev);
5550         int r;
5551
5552         r = pci_save_state(pdev);
5553         if (!r) {
5554                 kfree(adev->pci_state);
5555
5556                 adev->pci_state = pci_store_saved_state(pdev);
5557
5558                 if (!adev->pci_state) {
5559                         DRM_ERROR("Failed to store PCI saved state");
5560                         return false;
5561                 }
5562         } else {
5563                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5564                 return false;
5565         }
5566
5567         return true;
5568 }
5569
5570 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5571 {
5572         struct drm_device *dev = pci_get_drvdata(pdev);
5573         struct amdgpu_device *adev = drm_to_adev(dev);
5574         int r;
5575
5576         if (!adev->pci_state)
5577                 return false;
5578
5579         r = pci_load_saved_state(pdev, adev->pci_state);
5580
5581         if (!r) {
5582                 pci_restore_state(pdev);
5583         } else {
5584                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5585                 return false;
5586         }
5587
5588         return true;
5589 }
5590
5591 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5592                 struct amdgpu_ring *ring)
5593 {
5594 #ifdef CONFIG_X86_64
5595         if (adev->flags & AMD_IS_APU)
5596                 return;
5597 #endif
5598         if (adev->gmc.xgmi.connected_to_cpu)
5599                 return;
5600
5601         if (ring && ring->funcs->emit_hdp_flush)
5602                 amdgpu_ring_emit_hdp_flush(ring);
5603         else
5604                 amdgpu_asic_flush_hdp(adev, ring);
5605 }
5606
5607 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5608                 struct amdgpu_ring *ring)
5609 {
5610 #ifdef CONFIG_X86_64
5611         if (adev->flags & AMD_IS_APU)
5612                 return;
5613 #endif
5614         if (adev->gmc.xgmi.connected_to_cpu)
5615                 return;
5616
5617         amdgpu_asic_invalidate_hdp(adev, ring);
5618 }