drm/amdgpu: Adjust removal control flow for smu v13_0_2
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77
78 #include <drm/drm_drv.h>
79
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87
88 #define AMDGPU_RESUME_MS                2000
89 #define AMDGPU_MAX_RETRY_LIMIT          2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91
92 const char *amdgpu_asic_name[] = {
93         "TAHITI",
94         "PITCAIRN",
95         "VERDE",
96         "OLAND",
97         "HAINAN",
98         "BONAIRE",
99         "KAVERI",
100         "KABINI",
101         "HAWAII",
102         "MULLINS",
103         "TOPAZ",
104         "TONGA",
105         "FIJI",
106         "CARRIZO",
107         "STONEY",
108         "POLARIS10",
109         "POLARIS11",
110         "POLARIS12",
111         "VEGAM",
112         "VEGA10",
113         "VEGA12",
114         "VEGA20",
115         "RAVEN",
116         "ARCTURUS",
117         "RENOIR",
118         "ALDEBARAN",
119         "NAVI10",
120         "CYAN_SKILLFISH",
121         "NAVI14",
122         "NAVI12",
123         "SIENNA_CICHLID",
124         "NAVY_FLOUNDER",
125         "VANGOGH",
126         "DIMGREY_CAVEFISH",
127         "BEIGE_GOBY",
128         "YELLOW_CARP",
129         "IP DISCOVERY",
130         "LAST",
131 };
132
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143                 struct device_attribute *attr, char *buf)
144 {
145         struct drm_device *ddev = dev_get_drvdata(dev);
146         struct amdgpu_device *adev = drm_to_adev(ddev);
147         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148
149         return sysfs_emit(buf, "%llu\n", cnt);
150 }
151
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153                 amdgpu_device_get_pcie_replay_count, NULL);
154
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168                 struct device_attribute *attr, char *buf)
169 {
170         struct drm_device *ddev = dev_get_drvdata(dev);
171         struct amdgpu_device *adev = drm_to_adev(ddev);
172
173         return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175
176 static DEVICE_ATTR(product_name, S_IRUGO,
177                 amdgpu_device_get_product_name, NULL);
178
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190                 struct device_attribute *attr, char *buf)
191 {
192         struct drm_device *ddev = dev_get_drvdata(dev);
193         struct amdgpu_device *adev = drm_to_adev(ddev);
194
195         return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197
198 static DEVICE_ATTR(product_number, S_IRUGO,
199                 amdgpu_device_get_product_number, NULL);
200
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct drm_device *ddev = dev_get_drvdata(dev);
215         struct amdgpu_device *adev = drm_to_adev(ddev);
216
217         return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221                 amdgpu_device_get_serial_number, NULL);
222
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233         struct amdgpu_device *adev = drm_to_adev(dev);
234
235         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236                 return true;
237         return false;
238 }
239
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250         struct amdgpu_device *adev = drm_to_adev(dev);
251
252         if (adev->has_pr3 ||
253             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254                 return true;
255         return false;
256 }
257
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268         struct amdgpu_device *adev = drm_to_adev(dev);
269
270         return amdgpu_asic_supports_baco(adev);
271 }
272
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284         return (amdgpu_device_supports_boco(dev) &&
285                 amdgpu_acpi_is_power_shift_control_supported());
286 }
287
288 /*
289  * VRAM access helper functions
290  */
291
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302                              void *buf, size_t size, bool write)
303 {
304         unsigned long flags;
305         uint32_t hi = ~0, tmp = 0;
306         uint32_t *data = buf;
307         uint64_t last;
308         int idx;
309
310         if (!drm_dev_enter(adev_to_drm(adev), &idx))
311                 return;
312
313         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314
315         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316         for (last = pos + size; pos < last; pos += 4) {
317                 tmp = pos >> 31;
318
319                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320                 if (tmp != hi) {
321                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322                         hi = tmp;
323                 }
324                 if (write)
325                         WREG32_NO_KIQ(mmMM_DATA, *data++);
326                 else
327                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
328         }
329
330         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331         drm_dev_exit(idx);
332 }
333
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346                                  void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349         void __iomem *addr;
350         size_t count = 0;
351         uint64_t last;
352
353         if (!adev->mman.aper_base_kaddr)
354                 return 0;
355
356         last = min(pos + size, adev->gmc.visible_vram_size);
357         if (last > pos) {
358                 addr = adev->mman.aper_base_kaddr + pos;
359                 count = last - pos;
360
361                 if (write) {
362                         memcpy_toio(addr, buf, count);
363                         mb();
364                         amdgpu_device_flush_hdp(adev, NULL);
365                 } else {
366                         amdgpu_device_invalidate_hdp(adev, NULL);
367                         mb();
368                         memcpy_fromio(buf, addr, count);
369                 }
370
371         }
372
373         return count;
374 #else
375         return 0;
376 #endif
377 }
378
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389                                void *buf, size_t size, bool write)
390 {
391         size_t count;
392
393         /* try to using vram apreature to access vram first */
394         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395         size -= count;
396         if (size) {
397                 /* using MM to access rest vram */
398                 pos += count;
399                 buf += count;
400                 amdgpu_device_mm_access(adev, pos, buf, size, write);
401         }
402 }
403
404 /*
405  * register access helper functions.
406  */
407
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411         if (adev->no_hw_access)
412                 return true;
413
414 #ifdef CONFIG_LOCKDEP
415         /*
416          * This is a bit complicated to understand, so worth a comment. What we assert
417          * here is that the GPU reset is not running on another thread in parallel.
418          *
419          * For this we trylock the read side of the reset semaphore, if that succeeds
420          * we know that the reset is not running in paralell.
421          *
422          * If the trylock fails we assert that we are either already holding the read
423          * side of the lock or are the reset thread itself and hold the write side of
424          * the lock.
425          */
426         if (in_task()) {
427                 if (down_read_trylock(&adev->reset_domain->sem))
428                         up_read(&adev->reset_domain->sem);
429                 else
430                         lockdep_assert_held(&adev->reset_domain->sem);
431         }
432 #endif
433         return false;
434 }
435
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446                             uint32_t reg, uint32_t acc_flags)
447 {
448         uint32_t ret;
449
450         if (amdgpu_device_skip_hw_access(adev))
451                 return 0;
452
453         if ((reg * 4) < adev->rmmio_size) {
454                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455                     amdgpu_sriov_runtime(adev) &&
456                     down_read_trylock(&adev->reset_domain->sem)) {
457                         ret = amdgpu_kiq_rreg(adev, reg);
458                         up_read(&adev->reset_domain->sem);
459                 } else {
460                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461                 }
462         } else {
463                 ret = adev->pcie_rreg(adev, reg * 4);
464         }
465
466         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467
468         return ret;
469 }
470
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487         if (amdgpu_device_skip_hw_access(adev))
488                 return 0;
489
490         if (offset < adev->rmmio_size)
491                 return (readb(adev->rmmio + offset));
492         BUG();
493 }
494
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512         if (amdgpu_device_skip_hw_access(adev))
513                 return;
514
515         if (offset < adev->rmmio_size)
516                 writeb(value, adev->rmmio + offset);
517         else
518                 BUG();
519 }
520
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532                         uint32_t reg, uint32_t v,
533                         uint32_t acc_flags)
534 {
535         if (amdgpu_device_skip_hw_access(adev))
536                 return;
537
538         if ((reg * 4) < adev->rmmio_size) {
539                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540                     amdgpu_sriov_runtime(adev) &&
541                     down_read_trylock(&adev->reset_domain->sem)) {
542                         amdgpu_kiq_wreg(adev, reg, v);
543                         up_read(&adev->reset_domain->sem);
544                 } else {
545                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546                 }
547         } else {
548                 adev->pcie_wreg(adev, reg * 4, v);
549         }
550
551         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564                              uint32_t reg, uint32_t v)
565 {
566         if (amdgpu_device_skip_hw_access(adev))
567                 return;
568
569         if (amdgpu_sriov_fullaccess(adev) &&
570             adev->gfx.rlc.funcs &&
571             adev->gfx.rlc.funcs->is_rlcg_access_range) {
572                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574         } else if ((reg * 4) >= adev->rmmio_size) {
575                 adev->pcie_wreg(adev, reg * 4, v);
576         } else {
577                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578         }
579 }
580
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592         if (amdgpu_device_skip_hw_access(adev))
593                 return 0;
594
595         if (index < adev->doorbell.num_doorbells) {
596                 return readl(adev->doorbell.ptr + index);
597         } else {
598                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599                 return 0;
600         }
601 }
602
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615         if (amdgpu_device_skip_hw_access(adev))
616                 return;
617
618         if (index < adev->doorbell.num_doorbells) {
619                 writel(v, adev->doorbell.ptr + index);
620         } else {
621                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622         }
623 }
624
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636         if (amdgpu_device_skip_hw_access(adev))
637                 return 0;
638
639         if (index < adev->doorbell.num_doorbells) {
640                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641         } else {
642                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643                 return 0;
644         }
645 }
646
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659         if (amdgpu_device_skip_hw_access(adev))
660                 return;
661
662         if (index < adev->doorbell.num_doorbells) {
663                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664         } else {
665                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666         }
667 }
668
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680                                 u32 pcie_index, u32 pcie_data,
681                                 u32 reg_addr)
682 {
683         unsigned long flags;
684         u32 r;
685         void __iomem *pcie_index_offset;
686         void __iomem *pcie_data_offset;
687
688         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691
692         writel(reg_addr, pcie_index_offset);
693         readl(pcie_index_offset);
694         r = readl(pcie_data_offset);
695         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696
697         return r;
698 }
699
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711                                   u32 pcie_index, u32 pcie_data,
712                                   u32 reg_addr)
713 {
714         unsigned long flags;
715         u64 r;
716         void __iomem *pcie_index_offset;
717         void __iomem *pcie_data_offset;
718
719         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722
723         /* read low 32 bits */
724         writel(reg_addr, pcie_index_offset);
725         readl(pcie_index_offset);
726         r = readl(pcie_data_offset);
727         /* read high 32 bits */
728         writel(reg_addr + 4, pcie_index_offset);
729         readl(pcie_index_offset);
730         r |= ((u64)readl(pcie_data_offset) << 32);
731         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732
733         return r;
734 }
735
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747                                  u32 pcie_index, u32 pcie_data,
748                                  u32 reg_addr, u32 reg_data)
749 {
750         unsigned long flags;
751         void __iomem *pcie_index_offset;
752         void __iomem *pcie_data_offset;
753
754         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757
758         writel(reg_addr, pcie_index_offset);
759         readl(pcie_index_offset);
760         writel(reg_data, pcie_data_offset);
761         readl(pcie_data_offset);
762         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776                                    u32 pcie_index, u32 pcie_data,
777                                    u32 reg_addr, u64 reg_data)
778 {
779         unsigned long flags;
780         void __iomem *pcie_index_offset;
781         void __iomem *pcie_data_offset;
782
783         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786
787         /* write low 32 bits */
788         writel(reg_addr, pcie_index_offset);
789         readl(pcie_index_offset);
790         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791         readl(pcie_data_offset);
792         /* write high 32 bits */
793         writel(reg_addr + 4, pcie_index_offset);
794         readl(pcie_index_offset);
795         writel((u32)(reg_data >> 32), pcie_data_offset);
796         readl(pcie_data_offset);
797         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830                   reg, v);
831         BUG();
832 }
833
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847         BUG();
848         return 0;
849 }
850
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864                   reg, v);
865         BUG();
866 }
867
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880                                           uint32_t block, uint32_t reg)
881 {
882         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883                   reg, block);
884         BUG();
885         return 0;
886 }
887
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900                                       uint32_t block,
901                                       uint32_t reg, uint32_t v)
902 {
903         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904                   reg, block, v);
905         BUG();
906 }
907
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917         amdgpu_asic_pre_asic_init(adev);
918
919         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920                 return amdgpu_atomfirmware_asic_init(adev, true);
921         else
922                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937                                        &adev->vram_scratch.robj,
938                                        &adev->vram_scratch.gpu_addr,
939                                        (void **)&adev->vram_scratch.ptr);
940 }
941
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965                                              const u32 *registers,
966                                              const u32 array_size)
967 {
968         u32 tmp, reg, and_mask, or_mask;
969         int i;
970
971         if (array_size % 3)
972                 return;
973
974         for (i = 0; i < array_size; i +=3) {
975                 reg = registers[i + 0];
976                 and_mask = registers[i + 1];
977                 or_mask = registers[i + 2];
978
979                 if (and_mask == 0xffffffff) {
980                         tmp = or_mask;
981                 } else {
982                         tmp = RREG32(reg);
983                         tmp &= ~and_mask;
984                         if (adev->family >= AMDGPU_FAMILY_AI)
985                                 tmp |= (or_mask & and_mask);
986                         else
987                                 tmp |= or_mask;
988                 }
989                 WREG32(reg, tmp);
990         }
991 }
992
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015         return pci_reset_function(adev->pdev);
1016 }
1017
1018 /*
1019  * GPU doorbell aperture helpers function.
1020  */
1021 /**
1022  * amdgpu_device_doorbell_init - Init doorbell driver information.
1023  *
1024  * @adev: amdgpu_device pointer
1025  *
1026  * Init doorbell driver information (CIK)
1027  * Returns 0 on success, error on failure.
1028  */
1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1030 {
1031
1032         /* No doorbell on SI hardware generation */
1033         if (adev->asic_type < CHIP_BONAIRE) {
1034                 adev->doorbell.base = 0;
1035                 adev->doorbell.size = 0;
1036                 adev->doorbell.num_doorbells = 0;
1037                 adev->doorbell.ptr = NULL;
1038                 return 0;
1039         }
1040
1041         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1042                 return -EINVAL;
1043
1044         amdgpu_asic_init_doorbell_index(adev);
1045
1046         /* doorbell bar mapping */
1047         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1048         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049
1050         if (adev->enable_mes) {
1051                 adev->doorbell.num_doorbells =
1052                         adev->doorbell.size / sizeof(u32);
1053         } else {
1054                 adev->doorbell.num_doorbells =
1055                         min_t(u32, adev->doorbell.size / sizeof(u32),
1056                               adev->doorbell_index.max_assignment+1);
1057                 if (adev->doorbell.num_doorbells == 0)
1058                         return -EINVAL;
1059
1060                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1061                  * paging queue doorbell use the second page. The
1062                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1063                  * doorbells are in the first page. So with paging queue enabled,
1064                  * the max num_doorbells should + 1 page (0x400 in dword)
1065                  */
1066                 if (adev->asic_type >= CHIP_VEGA10)
1067                         adev->doorbell.num_doorbells += 0x400;
1068         }
1069
1070         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1071                                      adev->doorbell.num_doorbells *
1072                                      sizeof(u32));
1073         if (adev->doorbell.ptr == NULL)
1074                 return -ENOMEM;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1081  *
1082  * @adev: amdgpu_device pointer
1083  *
1084  * Tear down doorbell driver information (CIK)
1085  */
1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1087 {
1088         iounmap(adev->doorbell.ptr);
1089         adev->doorbell.ptr = NULL;
1090 }
1091
1092
1093
1094 /*
1095  * amdgpu_device_wb_*()
1096  * Writeback is the method by which the GPU updates special pages in memory
1097  * with the status of certain GPU events (fences, ring pointers,etc.).
1098  */
1099
1100 /**
1101  * amdgpu_device_wb_fini - Disable Writeback and free memory
1102  *
1103  * @adev: amdgpu_device pointer
1104  *
1105  * Disables Writeback and frees the Writeback memory (all asics).
1106  * Used at driver shutdown.
1107  */
1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1109 {
1110         if (adev->wb.wb_obj) {
1111                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1112                                       &adev->wb.gpu_addr,
1113                                       (void **)&adev->wb.wb);
1114                 adev->wb.wb_obj = NULL;
1115         }
1116 }
1117
1118 /**
1119  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1120  *
1121  * @adev: amdgpu_device pointer
1122  *
1123  * Initializes writeback and allocates writeback memory (all asics).
1124  * Used at driver startup.
1125  * Returns 0 on success or an -error on failure.
1126  */
1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1128 {
1129         int r;
1130
1131         if (adev->wb.wb_obj == NULL) {
1132                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1133                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1134                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1135                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1136                                             (void **)&adev->wb.wb);
1137                 if (r) {
1138                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1139                         return r;
1140                 }
1141
1142                 adev->wb.num_wb = AMDGPU_MAX_WB;
1143                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1144
1145                 /* clear wb memory */
1146                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1147         }
1148
1149         return 0;
1150 }
1151
1152 /**
1153  * amdgpu_device_wb_get - Allocate a wb entry
1154  *
1155  * @adev: amdgpu_device pointer
1156  * @wb: wb index
1157  *
1158  * Allocate a wb slot for use by the driver (all asics).
1159  * Returns 0 on success or -EINVAL on failure.
1160  */
1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1162 {
1163         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1164
1165         if (offset < adev->wb.num_wb) {
1166                 __set_bit(offset, adev->wb.used);
1167                 *wb = offset << 3; /* convert to dw offset */
1168                 return 0;
1169         } else {
1170                 return -EINVAL;
1171         }
1172 }
1173
1174 /**
1175  * amdgpu_device_wb_free - Free a wb entry
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @wb: wb index
1179  *
1180  * Free a wb slot allocated for use by the driver (all asics)
1181  */
1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1183 {
1184         wb >>= 3;
1185         if (wb < adev->wb.num_wb)
1186                 __clear_bit(wb, adev->wb.used);
1187 }
1188
1189 /**
1190  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1191  *
1192  * @adev: amdgpu_device pointer
1193  *
1194  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1195  * to fail, but if any of the BARs is not accessible after the size we abort
1196  * driver loading by returning -ENODEV.
1197  */
1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1199 {
1200         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1201         struct pci_bus *root;
1202         struct resource *res;
1203         unsigned i;
1204         u16 cmd;
1205         int r;
1206
1207         /* Bypass for VF */
1208         if (amdgpu_sriov_vf(adev))
1209                 return 0;
1210
1211         /* skip if the bios has already enabled large BAR */
1212         if (adev->gmc.real_vram_size &&
1213             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1214                 return 0;
1215
1216         /* Check if the root BUS has 64bit memory resources */
1217         root = adev->pdev->bus;
1218         while (root->parent)
1219                 root = root->parent;
1220
1221         pci_bus_for_each_resource(root, res, i) {
1222                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1223                     res->start > 0x100000000ull)
1224                         break;
1225         }
1226
1227         /* Trying to resize is pointless without a root hub window above 4GB */
1228         if (!res)
1229                 return 0;
1230
1231         /* Limit the BAR size to what is available */
1232         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1233                         rbar_size);
1234
1235         /* Disable memory decoding while we change the BAR addresses and size */
1236         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1237         pci_write_config_word(adev->pdev, PCI_COMMAND,
1238                               cmd & ~PCI_COMMAND_MEMORY);
1239
1240         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1241         amdgpu_device_doorbell_fini(adev);
1242         if (adev->asic_type >= CHIP_BONAIRE)
1243                 pci_release_resource(adev->pdev, 2);
1244
1245         pci_release_resource(adev->pdev, 0);
1246
1247         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1248         if (r == -ENOSPC)
1249                 DRM_INFO("Not enough PCI address space for a large BAR.");
1250         else if (r && r != -ENOTSUPP)
1251                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1252
1253         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1254
1255         /* When the doorbell or fb BAR isn't available we have no chance of
1256          * using the device.
1257          */
1258         r = amdgpu_device_doorbell_init(adev);
1259         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1260                 return -ENODEV;
1261
1262         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * GPU helpers function.
1269  */
1270 /**
1271  * amdgpu_device_need_post - check if the hw need post or not
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Check if the asic has been initialized (all asics) at driver startup
1276  * or post is needed if  hw reset is performed.
1277  * Returns true if need or false if not.
1278  */
1279 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1280 {
1281         uint32_t reg;
1282
1283         if (amdgpu_sriov_vf(adev))
1284                 return false;
1285
1286         if (amdgpu_passthrough(adev)) {
1287                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1288                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1289                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1290                  * vpost executed for smc version below 22.15
1291                  */
1292                 if (adev->asic_type == CHIP_FIJI) {
1293                         int err;
1294                         uint32_t fw_ver;
1295                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1296                         /* force vPost if error occured */
1297                         if (err)
1298                                 return true;
1299
1300                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1301                         if (fw_ver < 0x00160e00)
1302                                 return true;
1303                 }
1304         }
1305
1306         /* Don't post if we need to reset whole hive on init */
1307         if (adev->gmc.xgmi.pending_reset)
1308                 return false;
1309
1310         if (adev->has_hw_reset) {
1311                 adev->has_hw_reset = false;
1312                 return true;
1313         }
1314
1315         /* bios scratch used on CIK+ */
1316         if (adev->asic_type >= CHIP_BONAIRE)
1317                 return amdgpu_atombios_scratch_need_asic_init(adev);
1318
1319         /* check MEM_SIZE for older asics */
1320         reg = amdgpu_asic_get_config_memsize(adev);
1321
1322         if ((reg != 0) && (reg != 0xffffffff))
1323                 return false;
1324
1325         return true;
1326 }
1327
1328 /**
1329  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1334  * be set for this device.
1335  *
1336  * Returns true if it should be used or false if not.
1337  */
1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1339 {
1340         switch (amdgpu_aspm) {
1341         case -1:
1342                 break;
1343         case 0:
1344                 return false;
1345         case 1:
1346                 return true;
1347         default:
1348                 return false;
1349         }
1350         return pcie_aspm_enabled(adev->pdev);
1351 }
1352
1353 /* if we get transitioned to only one device, take VGA back */
1354 /**
1355  * amdgpu_device_vga_set_decode - enable/disable vga decode
1356  *
1357  * @pdev: PCI device pointer
1358  * @state: enable/disable vga decode
1359  *
1360  * Enable/disable vga decode (all asics).
1361  * Returns VGA resource flags.
1362  */
1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1364                 bool state)
1365 {
1366         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1367         amdgpu_asic_set_vga_state(adev, state);
1368         if (state)
1369                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1370                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1371         else
1372                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1373 }
1374
1375 /**
1376  * amdgpu_device_check_block_size - validate the vm block size
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Validates the vm block size specified via module parameter.
1381  * The vm block size defines number of bits in page table versus page directory,
1382  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1383  * page table and the remaining bits are in the page directory.
1384  */
1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1386 {
1387         /* defines number of bits in page table versus page directory,
1388          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1389          * page table and the remaining bits are in the page directory */
1390         if (amdgpu_vm_block_size == -1)
1391                 return;
1392
1393         if (amdgpu_vm_block_size < 9) {
1394                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1395                          amdgpu_vm_block_size);
1396                 amdgpu_vm_block_size = -1;
1397         }
1398 }
1399
1400 /**
1401  * amdgpu_device_check_vm_size - validate the vm size
1402  *
1403  * @adev: amdgpu_device pointer
1404  *
1405  * Validates the vm size in GB specified via module parameter.
1406  * The VM size is the size of the GPU virtual memory space in GB.
1407  */
1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1409 {
1410         /* no need to check the default value */
1411         if (amdgpu_vm_size == -1)
1412                 return;
1413
1414         if (amdgpu_vm_size < 1) {
1415                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1416                          amdgpu_vm_size);
1417                 amdgpu_vm_size = -1;
1418         }
1419 }
1420
1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 {
1423         struct sysinfo si;
1424         bool is_os_64 = (sizeof(void *) == 8);
1425         uint64_t total_memory;
1426         uint64_t dram_size_seven_GB = 0x1B8000000;
1427         uint64_t dram_size_three_GB = 0xB8000000;
1428
1429         if (amdgpu_smu_memory_pool_size == 0)
1430                 return;
1431
1432         if (!is_os_64) {
1433                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1434                 goto def_value;
1435         }
1436         si_meminfo(&si);
1437         total_memory = (uint64_t)si.totalram * si.mem_unit;
1438
1439         if ((amdgpu_smu_memory_pool_size == 1) ||
1440                 (amdgpu_smu_memory_pool_size == 2)) {
1441                 if (total_memory < dram_size_three_GB)
1442                         goto def_value1;
1443         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1444                 (amdgpu_smu_memory_pool_size == 8)) {
1445                 if (total_memory < dram_size_seven_GB)
1446                         goto def_value1;
1447         } else {
1448                 DRM_WARN("Smu memory pool size not supported\n");
1449                 goto def_value;
1450         }
1451         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1452
1453         return;
1454
1455 def_value1:
1456         DRM_WARN("No enough system memory\n");
1457 def_value:
1458         adev->pm.smu_prv_buffer_size = 0;
1459 }
1460
1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1462 {
1463         if (!(adev->flags & AMD_IS_APU) ||
1464             adev->asic_type < CHIP_RAVEN)
1465                 return 0;
1466
1467         switch (adev->asic_type) {
1468         case CHIP_RAVEN:
1469                 if (adev->pdev->device == 0x15dd)
1470                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1471                 if (adev->pdev->device == 0x15d8)
1472                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1473                 break;
1474         case CHIP_RENOIR:
1475                 if ((adev->pdev->device == 0x1636) ||
1476                     (adev->pdev->device == 0x164c))
1477                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1478                 else
1479                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1480                 break;
1481         case CHIP_VANGOGH:
1482                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1483                 break;
1484         case CHIP_YELLOW_CARP:
1485                 break;
1486         case CHIP_CYAN_SKILLFISH:
1487                 if ((adev->pdev->device == 0x13FE) ||
1488                     (adev->pdev->device == 0x143F))
1489                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         return 0;
1496 }
1497
1498 /**
1499  * amdgpu_device_check_arguments - validate module params
1500  *
1501  * @adev: amdgpu_device pointer
1502  *
1503  * Validates certain module parameters and updates
1504  * the associated values used by the driver (all asics).
1505  */
1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1507 {
1508         if (amdgpu_sched_jobs < 4) {
1509                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1510                          amdgpu_sched_jobs);
1511                 amdgpu_sched_jobs = 4;
1512         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1513                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1514                          amdgpu_sched_jobs);
1515                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1516         }
1517
1518         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1519                 /* gart size must be greater or equal to 32M */
1520                 dev_warn(adev->dev, "gart size (%d) too small\n",
1521                          amdgpu_gart_size);
1522                 amdgpu_gart_size = -1;
1523         }
1524
1525         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1526                 /* gtt size must be greater or equal to 32M */
1527                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1528                                  amdgpu_gtt_size);
1529                 amdgpu_gtt_size = -1;
1530         }
1531
1532         /* valid range is between 4 and 9 inclusive */
1533         if (amdgpu_vm_fragment_size != -1 &&
1534             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1535                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1536                 amdgpu_vm_fragment_size = -1;
1537         }
1538
1539         if (amdgpu_sched_hw_submission < 2) {
1540                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1541                          amdgpu_sched_hw_submission);
1542                 amdgpu_sched_hw_submission = 2;
1543         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1544                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1545                          amdgpu_sched_hw_submission);
1546                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1547         }
1548
1549         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1550                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1551                 amdgpu_reset_method = -1;
1552         }
1553
1554         amdgpu_device_check_smu_prv_buffer_size(adev);
1555
1556         amdgpu_device_check_vm_size(adev);
1557
1558         amdgpu_device_check_block_size(adev);
1559
1560         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * amdgpu_switcheroo_set_state - set switcheroo state
1567  *
1568  * @pdev: pci dev pointer
1569  * @state: vga_switcheroo state
1570  *
1571  * Callback for the switcheroo driver.  Suspends or resumes the
1572  * the asics before or after it is powered up using ACPI methods.
1573  */
1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1575                                         enum vga_switcheroo_state state)
1576 {
1577         struct drm_device *dev = pci_get_drvdata(pdev);
1578         int r;
1579
1580         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1581                 return;
1582
1583         if (state == VGA_SWITCHEROO_ON) {
1584                 pr_info("switched on\n");
1585                 /* don't suspend or resume card normally */
1586                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1587
1588                 pci_set_power_state(pdev, PCI_D0);
1589                 amdgpu_device_load_pci_state(pdev);
1590                 r = pci_enable_device(pdev);
1591                 if (r)
1592                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1593                 amdgpu_device_resume(dev, true);
1594
1595                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1596         } else {
1597                 pr_info("switched off\n");
1598                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1599                 amdgpu_device_suspend(dev, true);
1600                 amdgpu_device_cache_pci_state(pdev);
1601                 /* Shut down the device */
1602                 pci_disable_device(pdev);
1603                 pci_set_power_state(pdev, PCI_D3cold);
1604                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1605         }
1606 }
1607
1608 /**
1609  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1610  *
1611  * @pdev: pci dev pointer
1612  *
1613  * Callback for the switcheroo driver.  Check of the switcheroo
1614  * state can be changed.
1615  * Returns true if the state can be changed, false if not.
1616  */
1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1618 {
1619         struct drm_device *dev = pci_get_drvdata(pdev);
1620
1621         /*
1622         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1623         * locking inversion with the driver load path. And the access here is
1624         * completely racy anyway. So don't bother with locking for now.
1625         */
1626         return atomic_read(&dev->open_count) == 0;
1627 }
1628
1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1630         .set_gpu_state = amdgpu_switcheroo_set_state,
1631         .reprobe = NULL,
1632         .can_switch = amdgpu_switcheroo_can_switch,
1633 };
1634
1635 /**
1636  * amdgpu_device_ip_set_clockgating_state - set the CG state
1637  *
1638  * @dev: amdgpu_device pointer
1639  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1640  * @state: clockgating state (gate or ungate)
1641  *
1642  * Sets the requested clockgating state for all instances of
1643  * the hardware IP specified.
1644  * Returns the error code from the last instance.
1645  */
1646 int amdgpu_device_ip_set_clockgating_state(void *dev,
1647                                            enum amd_ip_block_type block_type,
1648                                            enum amd_clockgating_state state)
1649 {
1650         struct amdgpu_device *adev = dev;
1651         int i, r = 0;
1652
1653         for (i = 0; i < adev->num_ip_blocks; i++) {
1654                 if (!adev->ip_blocks[i].status.valid)
1655                         continue;
1656                 if (adev->ip_blocks[i].version->type != block_type)
1657                         continue;
1658                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1659                         continue;
1660                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1661                         (void *)adev, state);
1662                 if (r)
1663                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1664                                   adev->ip_blocks[i].version->funcs->name, r);
1665         }
1666         return r;
1667 }
1668
1669 /**
1670  * amdgpu_device_ip_set_powergating_state - set the PG state
1671  *
1672  * @dev: amdgpu_device pointer
1673  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1674  * @state: powergating state (gate or ungate)
1675  *
1676  * Sets the requested powergating state for all instances of
1677  * the hardware IP specified.
1678  * Returns the error code from the last instance.
1679  */
1680 int amdgpu_device_ip_set_powergating_state(void *dev,
1681                                            enum amd_ip_block_type block_type,
1682                                            enum amd_powergating_state state)
1683 {
1684         struct amdgpu_device *adev = dev;
1685         int i, r = 0;
1686
1687         for (i = 0; i < adev->num_ip_blocks; i++) {
1688                 if (!adev->ip_blocks[i].status.valid)
1689                         continue;
1690                 if (adev->ip_blocks[i].version->type != block_type)
1691                         continue;
1692                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1693                         continue;
1694                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1695                         (void *)adev, state);
1696                 if (r)
1697                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1698                                   adev->ip_blocks[i].version->funcs->name, r);
1699         }
1700         return r;
1701 }
1702
1703 /**
1704  * amdgpu_device_ip_get_clockgating_state - get the CG state
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @flags: clockgating feature flags
1708  *
1709  * Walks the list of IPs on the device and updates the clockgating
1710  * flags for each IP.
1711  * Updates @flags with the feature flags for each hardware IP where
1712  * clockgating is enabled.
1713  */
1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1715                                             u64 *flags)
1716 {
1717         int i;
1718
1719         for (i = 0; i < adev->num_ip_blocks; i++) {
1720                 if (!adev->ip_blocks[i].status.valid)
1721                         continue;
1722                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1723                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1724         }
1725 }
1726
1727 /**
1728  * amdgpu_device_ip_wait_for_idle - wait for idle
1729  *
1730  * @adev: amdgpu_device pointer
1731  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1732  *
1733  * Waits for the request hardware IP to be idle.
1734  * Returns 0 for success or a negative error code on failure.
1735  */
1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1737                                    enum amd_ip_block_type block_type)
1738 {
1739         int i, r;
1740
1741         for (i = 0; i < adev->num_ip_blocks; i++) {
1742                 if (!adev->ip_blocks[i].status.valid)
1743                         continue;
1744                 if (adev->ip_blocks[i].version->type == block_type) {
1745                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1746                         if (r)
1747                                 return r;
1748                         break;
1749                 }
1750         }
1751         return 0;
1752
1753 }
1754
1755 /**
1756  * amdgpu_device_ip_is_idle - is the hardware IP idle
1757  *
1758  * @adev: amdgpu_device pointer
1759  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1760  *
1761  * Check if the hardware IP is idle or not.
1762  * Returns true if it the IP is idle, false if not.
1763  */
1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1765                               enum amd_ip_block_type block_type)
1766 {
1767         int i;
1768
1769         for (i = 0; i < adev->num_ip_blocks; i++) {
1770                 if (!adev->ip_blocks[i].status.valid)
1771                         continue;
1772                 if (adev->ip_blocks[i].version->type == block_type)
1773                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1774         }
1775         return true;
1776
1777 }
1778
1779 /**
1780  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1781  *
1782  * @adev: amdgpu_device pointer
1783  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1784  *
1785  * Returns a pointer to the hardware IP block structure
1786  * if it exists for the asic, otherwise NULL.
1787  */
1788 struct amdgpu_ip_block *
1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1790                               enum amd_ip_block_type type)
1791 {
1792         int i;
1793
1794         for (i = 0; i < adev->num_ip_blocks; i++)
1795                 if (adev->ip_blocks[i].version->type == type)
1796                         return &adev->ip_blocks[i];
1797
1798         return NULL;
1799 }
1800
1801 /**
1802  * amdgpu_device_ip_block_version_cmp
1803  *
1804  * @adev: amdgpu_device pointer
1805  * @type: enum amd_ip_block_type
1806  * @major: major version
1807  * @minor: minor version
1808  *
1809  * return 0 if equal or greater
1810  * return 1 if smaller or the ip_block doesn't exist
1811  */
1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1813                                        enum amd_ip_block_type type,
1814                                        u32 major, u32 minor)
1815 {
1816         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1817
1818         if (ip_block && ((ip_block->version->major > major) ||
1819                         ((ip_block->version->major == major) &&
1820                         (ip_block->version->minor >= minor))))
1821                 return 0;
1822
1823         return 1;
1824 }
1825
1826 /**
1827  * amdgpu_device_ip_block_add
1828  *
1829  * @adev: amdgpu_device pointer
1830  * @ip_block_version: pointer to the IP to add
1831  *
1832  * Adds the IP block driver information to the collection of IPs
1833  * on the asic.
1834  */
1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1836                                const struct amdgpu_ip_block_version *ip_block_version)
1837 {
1838         if (!ip_block_version)
1839                 return -EINVAL;
1840
1841         switch (ip_block_version->type) {
1842         case AMD_IP_BLOCK_TYPE_VCN:
1843                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1844                         return 0;
1845                 break;
1846         case AMD_IP_BLOCK_TYPE_JPEG:
1847                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1848                         return 0;
1849                 break;
1850         default:
1851                 break;
1852         }
1853
1854         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1855                   ip_block_version->funcs->name);
1856
1857         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1858
1859         return 0;
1860 }
1861
1862 /**
1863  * amdgpu_device_enable_virtual_display - enable virtual display feature
1864  *
1865  * @adev: amdgpu_device pointer
1866  *
1867  * Enabled the virtual display feature if the user has enabled it via
1868  * the module parameter virtual_display.  This feature provides a virtual
1869  * display hardware on headless boards or in virtualized environments.
1870  * This function parses and validates the configuration string specified by
1871  * the user and configues the virtual display configuration (number of
1872  * virtual connectors, crtcs, etc.) specified.
1873  */
1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1875 {
1876         adev->enable_virtual_display = false;
1877
1878         if (amdgpu_virtual_display) {
1879                 const char *pci_address_name = pci_name(adev->pdev);
1880                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1881
1882                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1883                 pciaddstr_tmp = pciaddstr;
1884                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1885                         pciaddname = strsep(&pciaddname_tmp, ",");
1886                         if (!strcmp("all", pciaddname)
1887                             || !strcmp(pci_address_name, pciaddname)) {
1888                                 long num_crtc;
1889                                 int res = -1;
1890
1891                                 adev->enable_virtual_display = true;
1892
1893                                 if (pciaddname_tmp)
1894                                         res = kstrtol(pciaddname_tmp, 10,
1895                                                       &num_crtc);
1896
1897                                 if (!res) {
1898                                         if (num_crtc < 1)
1899                                                 num_crtc = 1;
1900                                         if (num_crtc > 6)
1901                                                 num_crtc = 6;
1902                                         adev->mode_info.num_crtc = num_crtc;
1903                                 } else {
1904                                         adev->mode_info.num_crtc = 1;
1905                                 }
1906                                 break;
1907                         }
1908                 }
1909
1910                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1911                          amdgpu_virtual_display, pci_address_name,
1912                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1913
1914                 kfree(pciaddstr);
1915         }
1916 }
1917
1918 /**
1919  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1920  *
1921  * @adev: amdgpu_device pointer
1922  *
1923  * Parses the asic configuration parameters specified in the gpu info
1924  * firmware and makes them availale to the driver for use in configuring
1925  * the asic.
1926  * Returns 0 on success, -EINVAL on failure.
1927  */
1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1929 {
1930         const char *chip_name;
1931         char fw_name[40];
1932         int err;
1933         const struct gpu_info_firmware_header_v1_0 *hdr;
1934
1935         adev->firmware.gpu_info_fw = NULL;
1936
1937         if (adev->mman.discovery_bin) {
1938                 /*
1939                  * FIXME: The bounding box is still needed by Navi12, so
1940                  * temporarily read it from gpu_info firmware. Should be dropped
1941                  * when DAL no longer needs it.
1942                  */
1943                 if (adev->asic_type != CHIP_NAVI12)
1944                         return 0;
1945         }
1946
1947         switch (adev->asic_type) {
1948         default:
1949                 return 0;
1950         case CHIP_VEGA10:
1951                 chip_name = "vega10";
1952                 break;
1953         case CHIP_VEGA12:
1954                 chip_name = "vega12";
1955                 break;
1956         case CHIP_RAVEN:
1957                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1958                         chip_name = "raven2";
1959                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1960                         chip_name = "picasso";
1961                 else
1962                         chip_name = "raven";
1963                 break;
1964         case CHIP_ARCTURUS:
1965                 chip_name = "arcturus";
1966                 break;
1967         case CHIP_NAVI12:
1968                 chip_name = "navi12";
1969                 break;
1970         }
1971
1972         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1973         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1974         if (err) {
1975                 dev_err(adev->dev,
1976                         "Failed to load gpu_info firmware \"%s\"\n",
1977                         fw_name);
1978                 goto out;
1979         }
1980         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1981         if (err) {
1982                 dev_err(adev->dev,
1983                         "Failed to validate gpu_info firmware \"%s\"\n",
1984                         fw_name);
1985                 goto out;
1986         }
1987
1988         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1989         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1990
1991         switch (hdr->version_major) {
1992         case 1:
1993         {
1994                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1995                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1996                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1997
1998                 /*
1999                  * Should be droped when DAL no longer needs it.
2000                  */
2001                 if (adev->asic_type == CHIP_NAVI12)
2002                         goto parse_soc_bounding_box;
2003
2004                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2005                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2006                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2007                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2008                 adev->gfx.config.max_texture_channel_caches =
2009                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2010                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2011                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2012                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2013                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2014                 adev->gfx.config.double_offchip_lds_buf =
2015                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2016                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2017                 adev->gfx.cu_info.max_waves_per_simd =
2018                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2019                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2020                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2021                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2022                 if (hdr->version_minor >= 1) {
2023                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2024                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2025                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2026                         adev->gfx.config.num_sc_per_sh =
2027                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2028                         adev->gfx.config.num_packer_per_sc =
2029                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2030                 }
2031
2032 parse_soc_bounding_box:
2033                 /*
2034                  * soc bounding box info is not integrated in disocovery table,
2035                  * we always need to parse it from gpu info firmware if needed.
2036                  */
2037                 if (hdr->version_minor == 2) {
2038                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2039                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2040                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2041                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2042                 }
2043                 break;
2044         }
2045         default:
2046                 dev_err(adev->dev,
2047                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2048                 err = -EINVAL;
2049                 goto out;
2050         }
2051 out:
2052         return err;
2053 }
2054
2055 /**
2056  * amdgpu_device_ip_early_init - run early init for hardware IPs
2057  *
2058  * @adev: amdgpu_device pointer
2059  *
2060  * Early initialization pass for hardware IPs.  The hardware IPs that make
2061  * up each asic are discovered each IP's early_init callback is run.  This
2062  * is the first stage in initializing the asic.
2063  * Returns 0 on success, negative error code on failure.
2064  */
2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2066 {
2067         struct drm_device *dev = adev_to_drm(adev);
2068         struct pci_dev *parent;
2069         int i, r;
2070
2071         amdgpu_device_enable_virtual_display(adev);
2072
2073         if (amdgpu_sriov_vf(adev)) {
2074                 r = amdgpu_virt_request_full_gpu(adev, true);
2075                 if (r)
2076                         return r;
2077         }
2078
2079         switch (adev->asic_type) {
2080 #ifdef CONFIG_DRM_AMDGPU_SI
2081         case CHIP_VERDE:
2082         case CHIP_TAHITI:
2083         case CHIP_PITCAIRN:
2084         case CHIP_OLAND:
2085         case CHIP_HAINAN:
2086                 adev->family = AMDGPU_FAMILY_SI;
2087                 r = si_set_ip_blocks(adev);
2088                 if (r)
2089                         return r;
2090                 break;
2091 #endif
2092 #ifdef CONFIG_DRM_AMDGPU_CIK
2093         case CHIP_BONAIRE:
2094         case CHIP_HAWAII:
2095         case CHIP_KAVERI:
2096         case CHIP_KABINI:
2097         case CHIP_MULLINS:
2098                 if (adev->flags & AMD_IS_APU)
2099                         adev->family = AMDGPU_FAMILY_KV;
2100                 else
2101                         adev->family = AMDGPU_FAMILY_CI;
2102
2103                 r = cik_set_ip_blocks(adev);
2104                 if (r)
2105                         return r;
2106                 break;
2107 #endif
2108         case CHIP_TOPAZ:
2109         case CHIP_TONGA:
2110         case CHIP_FIJI:
2111         case CHIP_POLARIS10:
2112         case CHIP_POLARIS11:
2113         case CHIP_POLARIS12:
2114         case CHIP_VEGAM:
2115         case CHIP_CARRIZO:
2116         case CHIP_STONEY:
2117                 if (adev->flags & AMD_IS_APU)
2118                         adev->family = AMDGPU_FAMILY_CZ;
2119                 else
2120                         adev->family = AMDGPU_FAMILY_VI;
2121
2122                 r = vi_set_ip_blocks(adev);
2123                 if (r)
2124                         return r;
2125                 break;
2126         default:
2127                 r = amdgpu_discovery_set_ip_blocks(adev);
2128                 if (r)
2129                         return r;
2130                 break;
2131         }
2132
2133         if (amdgpu_has_atpx() &&
2134             (amdgpu_is_atpx_hybrid() ||
2135              amdgpu_has_atpx_dgpu_power_cntl()) &&
2136             ((adev->flags & AMD_IS_APU) == 0) &&
2137             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2138                 adev->flags |= AMD_IS_PX;
2139
2140         if (!(adev->flags & AMD_IS_APU)) {
2141                 parent = pci_upstream_bridge(adev->pdev);
2142                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2143         }
2144
2145         amdgpu_amdkfd_device_probe(adev);
2146
2147         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2148         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2149                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2150         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2151                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2152
2153         for (i = 0; i < adev->num_ip_blocks; i++) {
2154                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2155                         DRM_ERROR("disabled ip block: %d <%s>\n",
2156                                   i, adev->ip_blocks[i].version->funcs->name);
2157                         adev->ip_blocks[i].status.valid = false;
2158                 } else {
2159                         if (adev->ip_blocks[i].version->funcs->early_init) {
2160                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2161                                 if (r == -ENOENT) {
2162                                         adev->ip_blocks[i].status.valid = false;
2163                                 } else if (r) {
2164                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2165                                                   adev->ip_blocks[i].version->funcs->name, r);
2166                                         return r;
2167                                 } else {
2168                                         adev->ip_blocks[i].status.valid = true;
2169                                 }
2170                         } else {
2171                                 adev->ip_blocks[i].status.valid = true;
2172                         }
2173                 }
2174                 /* get the vbios after the asic_funcs are set up */
2175                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2176                         r = amdgpu_device_parse_gpu_info_fw(adev);
2177                         if (r)
2178                                 return r;
2179
2180                         /* Read BIOS */
2181                         if (!amdgpu_get_bios(adev))
2182                                 return -EINVAL;
2183
2184                         r = amdgpu_atombios_init(adev);
2185                         if (r) {
2186                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2187                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2188                                 return r;
2189                         }
2190
2191                         /*get pf2vf msg info at it's earliest time*/
2192                         if (amdgpu_sriov_vf(adev))
2193                                 amdgpu_virt_init_data_exchange(adev);
2194
2195                 }
2196         }
2197
2198         adev->cg_flags &= amdgpu_cg_mask;
2199         adev->pg_flags &= amdgpu_pg_mask;
2200
2201         return 0;
2202 }
2203
2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2205 {
2206         int i, r;
2207
2208         for (i = 0; i < adev->num_ip_blocks; i++) {
2209                 if (!adev->ip_blocks[i].status.sw)
2210                         continue;
2211                 if (adev->ip_blocks[i].status.hw)
2212                         continue;
2213                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2214                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2215                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2216                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2217                         if (r) {
2218                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2219                                           adev->ip_blocks[i].version->funcs->name, r);
2220                                 return r;
2221                         }
2222                         adev->ip_blocks[i].status.hw = true;
2223                 }
2224         }
2225
2226         return 0;
2227 }
2228
2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2230 {
2231         int i, r;
2232
2233         for (i = 0; i < adev->num_ip_blocks; i++) {
2234                 if (!adev->ip_blocks[i].status.sw)
2235                         continue;
2236                 if (adev->ip_blocks[i].status.hw)
2237                         continue;
2238                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2239                 if (r) {
2240                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2241                                   adev->ip_blocks[i].version->funcs->name, r);
2242                         return r;
2243                 }
2244                 adev->ip_blocks[i].status.hw = true;
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2251 {
2252         int r = 0;
2253         int i;
2254         uint32_t smu_version;
2255
2256         if (adev->asic_type >= CHIP_VEGA10) {
2257                 for (i = 0; i < adev->num_ip_blocks; i++) {
2258                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2259                                 continue;
2260
2261                         if (!adev->ip_blocks[i].status.sw)
2262                                 continue;
2263
2264                         /* no need to do the fw loading again if already done*/
2265                         if (adev->ip_blocks[i].status.hw == true)
2266                                 break;
2267
2268                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2269                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2270                                 if (r) {
2271                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2272                                                           adev->ip_blocks[i].version->funcs->name, r);
2273                                         return r;
2274                                 }
2275                         } else {
2276                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2277                                 if (r) {
2278                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2279                                                           adev->ip_blocks[i].version->funcs->name, r);
2280                                         return r;
2281                                 }
2282                         }
2283
2284                         adev->ip_blocks[i].status.hw = true;
2285                         break;
2286                 }
2287         }
2288
2289         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2290                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291
2292         return r;
2293 }
2294
2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2296 {
2297         long timeout;
2298         int r, i;
2299
2300         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2301                 struct amdgpu_ring *ring = adev->rings[i];
2302
2303                 /* No need to setup the GPU scheduler for rings that don't need it */
2304                 if (!ring || ring->no_scheduler)
2305                         continue;
2306
2307                 switch (ring->funcs->type) {
2308                 case AMDGPU_RING_TYPE_GFX:
2309                         timeout = adev->gfx_timeout;
2310                         break;
2311                 case AMDGPU_RING_TYPE_COMPUTE:
2312                         timeout = adev->compute_timeout;
2313                         break;
2314                 case AMDGPU_RING_TYPE_SDMA:
2315                         timeout = adev->sdma_timeout;
2316                         break;
2317                 default:
2318                         timeout = adev->video_timeout;
2319                         break;
2320                 }
2321
2322                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2323                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2324                                    timeout, adev->reset_domain->wq,
2325                                    ring->sched_score, ring->name,
2326                                    adev->dev);
2327                 if (r) {
2328                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2329                                   ring->name);
2330                         return r;
2331                 }
2332         }
2333
2334         return 0;
2335 }
2336
2337
2338 /**
2339  * amdgpu_device_ip_init - run init for hardware IPs
2340  *
2341  * @adev: amdgpu_device pointer
2342  *
2343  * Main initialization pass for hardware IPs.  The list of all the hardware
2344  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2345  * are run.  sw_init initializes the software state associated with each IP
2346  * and hw_init initializes the hardware associated with each IP.
2347  * Returns 0 on success, negative error code on failure.
2348  */
2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2350 {
2351         int i, r;
2352
2353         r = amdgpu_ras_init(adev);
2354         if (r)
2355                 return r;
2356
2357         for (i = 0; i < adev->num_ip_blocks; i++) {
2358                 if (!adev->ip_blocks[i].status.valid)
2359                         continue;
2360                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2361                 if (r) {
2362                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2363                                   adev->ip_blocks[i].version->funcs->name, r);
2364                         goto init_failed;
2365                 }
2366                 adev->ip_blocks[i].status.sw = true;
2367
2368                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2369                         /* need to do common hw init early so everything is set up for gmc */
2370                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2371                         if (r) {
2372                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2373                                 goto init_failed;
2374                         }
2375                         adev->ip_blocks[i].status.hw = true;
2376                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2377                         /* need to do gmc hw init early so we can allocate gpu mem */
2378                         /* Try to reserve bad pages early */
2379                         if (amdgpu_sriov_vf(adev))
2380                                 amdgpu_virt_exchange_data(adev);
2381
2382                         r = amdgpu_device_vram_scratch_init(adev);
2383                         if (r) {
2384                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2385                                 goto init_failed;
2386                         }
2387                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2388                         if (r) {
2389                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2390                                 goto init_failed;
2391                         }
2392                         r = amdgpu_device_wb_init(adev);
2393                         if (r) {
2394                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2395                                 goto init_failed;
2396                         }
2397                         adev->ip_blocks[i].status.hw = true;
2398
2399                         /* right after GMC hw init, we create CSA */
2400                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2401                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2402                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2403                                                                 AMDGPU_CSA_SIZE);
2404                                 if (r) {
2405                                         DRM_ERROR("allocate CSA failed %d\n", r);
2406                                         goto init_failed;
2407                                 }
2408                         }
2409                 }
2410         }
2411
2412         if (amdgpu_sriov_vf(adev))
2413                 amdgpu_virt_init_data_exchange(adev);
2414
2415         r = amdgpu_ib_pool_init(adev);
2416         if (r) {
2417                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2418                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2419                 goto init_failed;
2420         }
2421
2422         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2423         if (r)
2424                 goto init_failed;
2425
2426         r = amdgpu_device_ip_hw_init_phase1(adev);
2427         if (r)
2428                 goto init_failed;
2429
2430         r = amdgpu_device_fw_loading(adev);
2431         if (r)
2432                 goto init_failed;
2433
2434         r = amdgpu_device_ip_hw_init_phase2(adev);
2435         if (r)
2436                 goto init_failed;
2437
2438         /*
2439          * retired pages will be loaded from eeprom and reserved here,
2440          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2441          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2442          * for I2C communication which only true at this point.
2443          *
2444          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2445          * failure from bad gpu situation and stop amdgpu init process
2446          * accordingly. For other failed cases, it will still release all
2447          * the resource and print error message, rather than returning one
2448          * negative value to upper level.
2449          *
2450          * Note: theoretically, this should be called before all vram allocations
2451          * to protect retired page from abusing
2452          */
2453         r = amdgpu_ras_recovery_init(adev);
2454         if (r)
2455                 goto init_failed;
2456
2457         /**
2458          * In case of XGMI grab extra reference for reset domain for this device
2459          */
2460         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2461                 if (amdgpu_xgmi_add_device(adev) == 0) {
2462                         if (!amdgpu_sriov_vf(adev)) {
2463                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2464
2465                                 if (!hive->reset_domain ||
2466                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2467                                         r = -ENOENT;
2468                                         amdgpu_put_xgmi_hive(hive);
2469                                         goto init_failed;
2470                                 }
2471
2472                                 /* Drop the early temporary reset domain we created for device */
2473                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2474                                 adev->reset_domain = hive->reset_domain;
2475                                 amdgpu_put_xgmi_hive(hive);
2476                         }
2477                 }
2478         }
2479
2480         r = amdgpu_device_init_schedulers(adev);
2481         if (r)
2482                 goto init_failed;
2483
2484         /* Don't init kfd if whole hive need to be reset during init */
2485         if (!adev->gmc.xgmi.pending_reset)
2486                 amdgpu_amdkfd_device_init(adev);
2487
2488         amdgpu_fru_get_product_info(adev);
2489
2490 init_failed:
2491         if (amdgpu_sriov_vf(adev))
2492                 amdgpu_virt_release_full_gpu(adev, true);
2493
2494         return r;
2495 }
2496
2497 /**
2498  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2499  *
2500  * @adev: amdgpu_device pointer
2501  *
2502  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2503  * this function before a GPU reset.  If the value is retained after a
2504  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2505  */
2506 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2507 {
2508         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2509 }
2510
2511 /**
2512  * amdgpu_device_check_vram_lost - check if vram is valid
2513  *
2514  * @adev: amdgpu_device pointer
2515  *
2516  * Checks the reset magic value written to the gart pointer in VRAM.
2517  * The driver calls this after a GPU reset to see if the contents of
2518  * VRAM is lost or now.
2519  * returns true if vram is lost, false if not.
2520  */
2521 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2522 {
2523         if (memcmp(adev->gart.ptr, adev->reset_magic,
2524                         AMDGPU_RESET_MAGIC_NUM))
2525                 return true;
2526
2527         if (!amdgpu_in_reset(adev))
2528                 return false;
2529
2530         /*
2531          * For all ASICs with baco/mode1 reset, the VRAM is
2532          * always assumed to be lost.
2533          */
2534         switch (amdgpu_asic_reset_method(adev)) {
2535         case AMD_RESET_METHOD_BACO:
2536         case AMD_RESET_METHOD_MODE1:
2537                 return true;
2538         default:
2539                 return false;
2540         }
2541 }
2542
2543 /**
2544  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2545  *
2546  * @adev: amdgpu_device pointer
2547  * @state: clockgating state (gate or ungate)
2548  *
2549  * The list of all the hardware IPs that make up the asic is walked and the
2550  * set_clockgating_state callbacks are run.
2551  * Late initialization pass enabling clockgating for hardware IPs.
2552  * Fini or suspend, pass disabling clockgating for hardware IPs.
2553  * Returns 0 on success, negative error code on failure.
2554  */
2555
2556 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557                                enum amd_clockgating_state state)
2558 {
2559         int i, j, r;
2560
2561         if (amdgpu_emu_mode == 1)
2562                 return 0;
2563
2564         for (j = 0; j < adev->num_ip_blocks; j++) {
2565                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566                 if (!adev->ip_blocks[i].status.late_initialized)
2567                         continue;
2568                 /* skip CG for GFX on S0ix */
2569                 if (adev->in_s0ix &&
2570                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2571                         continue;
2572                 /* skip CG for VCE/UVD, it's handled specially */
2573                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2578                         /* enable clockgating to save power */
2579                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2580                                                                                      state);
2581                         if (r) {
2582                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2583                                           adev->ip_blocks[i].version->funcs->name, r);
2584                                 return r;
2585                         }
2586                 }
2587         }
2588
2589         return 0;
2590 }
2591
2592 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2593                                enum amd_powergating_state state)
2594 {
2595         int i, j, r;
2596
2597         if (amdgpu_emu_mode == 1)
2598                 return 0;
2599
2600         for (j = 0; j < adev->num_ip_blocks; j++) {
2601                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2602                 if (!adev->ip_blocks[i].status.late_initialized)
2603                         continue;
2604                 /* skip PG for GFX on S0ix */
2605                 if (adev->in_s0ix &&
2606                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2607                         continue;
2608                 /* skip CG for VCE/UVD, it's handled specially */
2609                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2610                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2611                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2612                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2613                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2614                         /* enable powergating to save power */
2615                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2616                                                                                         state);
2617                         if (r) {
2618                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2619                                           adev->ip_blocks[i].version->funcs->name, r);
2620                                 return r;
2621                         }
2622                 }
2623         }
2624         return 0;
2625 }
2626
2627 static int amdgpu_device_enable_mgpu_fan_boost(void)
2628 {
2629         struct amdgpu_gpu_instance *gpu_ins;
2630         struct amdgpu_device *adev;
2631         int i, ret = 0;
2632
2633         mutex_lock(&mgpu_info.mutex);
2634
2635         /*
2636          * MGPU fan boost feature should be enabled
2637          * only when there are two or more dGPUs in
2638          * the system
2639          */
2640         if (mgpu_info.num_dgpu < 2)
2641                 goto out;
2642
2643         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2644                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2645                 adev = gpu_ins->adev;
2646                 if (!(adev->flags & AMD_IS_APU) &&
2647                     !gpu_ins->mgpu_fan_enabled) {
2648                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2649                         if (ret)
2650                                 break;
2651
2652                         gpu_ins->mgpu_fan_enabled = 1;
2653                 }
2654         }
2655
2656 out:
2657         mutex_unlock(&mgpu_info.mutex);
2658
2659         return ret;
2660 }
2661
2662 /**
2663  * amdgpu_device_ip_late_init - run late init for hardware IPs
2664  *
2665  * @adev: amdgpu_device pointer
2666  *
2667  * Late initialization pass for hardware IPs.  The list of all the hardware
2668  * IPs that make up the asic is walked and the late_init callbacks are run.
2669  * late_init covers any special initialization that an IP requires
2670  * after all of the have been initialized or something that needs to happen
2671  * late in the init process.
2672  * Returns 0 on success, negative error code on failure.
2673  */
2674 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2675 {
2676         struct amdgpu_gpu_instance *gpu_instance;
2677         int i = 0, r;
2678
2679         for (i = 0; i < adev->num_ip_blocks; i++) {
2680                 if (!adev->ip_blocks[i].status.hw)
2681                         continue;
2682                 if (adev->ip_blocks[i].version->funcs->late_init) {
2683                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2684                         if (r) {
2685                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2686                                           adev->ip_blocks[i].version->funcs->name, r);
2687                                 return r;
2688                         }
2689                 }
2690                 adev->ip_blocks[i].status.late_initialized = true;
2691         }
2692
2693         r = amdgpu_ras_late_init(adev);
2694         if (r) {
2695                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2696                 return r;
2697         }
2698
2699         amdgpu_ras_set_error_query_ready(adev, true);
2700
2701         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703
2704         amdgpu_device_fill_reset_magic(adev);
2705
2706         r = amdgpu_device_enable_mgpu_fan_boost();
2707         if (r)
2708                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2709
2710         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2712                                adev->asic_type == CHIP_ALDEBARAN ))
2713                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2714
2715         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2716                 mutex_lock(&mgpu_info.mutex);
2717
2718                 /*
2719                  * Reset device p-state to low as this was booted with high.
2720                  *
2721                  * This should be performed only after all devices from the same
2722                  * hive get initialized.
2723                  *
2724                  * However, it's unknown how many device in the hive in advance.
2725                  * As this is counted one by one during devices initializations.
2726                  *
2727                  * So, we wait for all XGMI interlinked devices initialized.
2728                  * This may bring some delays as those devices may come from
2729                  * different hives. But that should be OK.
2730                  */
2731                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2732                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2733                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2734                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2735                                         continue;
2736
2737                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2738                                                 AMDGPU_XGMI_PSTATE_MIN);
2739                                 if (r) {
2740                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2741                                         break;
2742                                 }
2743                         }
2744                 }
2745
2746                 mutex_unlock(&mgpu_info.mutex);
2747         }
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2754  *
2755  * @adev: amdgpu_device pointer
2756  *
2757  * For ASICs need to disable SMC first
2758  */
2759 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2760 {
2761         int i, r;
2762
2763         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2764                 return;
2765
2766         for (i = 0; i < adev->num_ip_blocks; i++) {
2767                 if (!adev->ip_blocks[i].status.hw)
2768                         continue;
2769                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2770                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2771                         /* XXX handle errors */
2772                         if (r) {
2773                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2774                                           adev->ip_blocks[i].version->funcs->name, r);
2775                         }
2776                         adev->ip_blocks[i].status.hw = false;
2777                         break;
2778                 }
2779         }
2780 }
2781
2782 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2783 {
2784         int i, r;
2785
2786         for (i = 0; i < adev->num_ip_blocks; i++) {
2787                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2788                         continue;
2789
2790                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2791                 if (r) {
2792                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2793                                   adev->ip_blocks[i].version->funcs->name, r);
2794                 }
2795         }
2796
2797         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2798         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2799
2800         amdgpu_amdkfd_suspend(adev, false);
2801
2802         /* Workaroud for ASICs need to disable SMC first */
2803         amdgpu_device_smu_fini_early(adev);
2804
2805         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2806                 if (!adev->ip_blocks[i].status.hw)
2807                         continue;
2808
2809                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2810                 /* XXX handle errors */
2811                 if (r) {
2812                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2813                                   adev->ip_blocks[i].version->funcs->name, r);
2814                 }
2815
2816                 adev->ip_blocks[i].status.hw = false;
2817         }
2818
2819         if (amdgpu_sriov_vf(adev)) {
2820                 if (amdgpu_virt_release_full_gpu(adev, false))
2821                         DRM_ERROR("failed to release exclusive mode on fini\n");
2822         }
2823
2824         return 0;
2825 }
2826
2827 /**
2828  * amdgpu_device_ip_fini - run fini for hardware IPs
2829  *
2830  * @adev: amdgpu_device pointer
2831  *
2832  * Main teardown pass for hardware IPs.  The list of all the hardware
2833  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2834  * are run.  hw_fini tears down the hardware associated with each IP
2835  * and sw_fini tears down any software state associated with each IP.
2836  * Returns 0 on success, negative error code on failure.
2837  */
2838 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2839 {
2840         int i, r;
2841
2842         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2843                 amdgpu_virt_release_ras_err_handler_data(adev);
2844
2845         if (adev->gmc.xgmi.num_physical_nodes > 1)
2846                 amdgpu_xgmi_remove_device(adev);
2847
2848         amdgpu_amdkfd_device_fini_sw(adev);
2849
2850         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2851                 if (!adev->ip_blocks[i].status.sw)
2852                         continue;
2853
2854                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2855                         amdgpu_ucode_free_bo(adev);
2856                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2857                         amdgpu_device_wb_fini(adev);
2858                         amdgpu_device_vram_scratch_fini(adev);
2859                         amdgpu_ib_pool_fini(adev);
2860                 }
2861
2862                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2863                 /* XXX handle errors */
2864                 if (r) {
2865                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2866                                   adev->ip_blocks[i].version->funcs->name, r);
2867                 }
2868                 adev->ip_blocks[i].status.sw = false;
2869                 adev->ip_blocks[i].status.valid = false;
2870         }
2871
2872         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2873                 if (!adev->ip_blocks[i].status.late_initialized)
2874                         continue;
2875                 if (adev->ip_blocks[i].version->funcs->late_fini)
2876                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2877                 adev->ip_blocks[i].status.late_initialized = false;
2878         }
2879
2880         amdgpu_ras_fini(adev);
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2887  *
2888  * @work: work_struct.
2889  */
2890 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2891 {
2892         struct amdgpu_device *adev =
2893                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2894         int r;
2895
2896         r = amdgpu_ib_ring_tests(adev);
2897         if (r)
2898                 DRM_ERROR("ib ring test failed (%d).\n", r);
2899 }
2900
2901 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2902 {
2903         struct amdgpu_device *adev =
2904                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2905
2906         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2907         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2908
2909         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2910                 adev->gfx.gfx_off_state = true;
2911 }
2912
2913 /**
2914  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2915  *
2916  * @adev: amdgpu_device pointer
2917  *
2918  * Main suspend function for hardware IPs.  The list of all the hardware
2919  * IPs that make up the asic is walked, clockgating is disabled and the
2920  * suspend callbacks are run.  suspend puts the hardware and software state
2921  * in each IP into a state suitable for suspend.
2922  * Returns 0 on success, negative error code on failure.
2923  */
2924 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2925 {
2926         int i, r;
2927
2928         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2929         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2930
2931         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2932                 if (!adev->ip_blocks[i].status.valid)
2933                         continue;
2934
2935                 /* displays are handled separately */
2936                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2937                         continue;
2938
2939                 /* XXX handle errors */
2940                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2941                 /* XXX handle errors */
2942                 if (r) {
2943                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2944                                   adev->ip_blocks[i].version->funcs->name, r);
2945                         return r;
2946                 }
2947
2948                 adev->ip_blocks[i].status.hw = false;
2949         }
2950
2951         return 0;
2952 }
2953
2954 /**
2955  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2956  *
2957  * @adev: amdgpu_device pointer
2958  *
2959  * Main suspend function for hardware IPs.  The list of all the hardware
2960  * IPs that make up the asic is walked, clockgating is disabled and the
2961  * suspend callbacks are run.  suspend puts the hardware and software state
2962  * in each IP into a state suitable for suspend.
2963  * Returns 0 on success, negative error code on failure.
2964  */
2965 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2966 {
2967         int i, r;
2968
2969         if (adev->in_s0ix)
2970                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2971
2972         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2973                 if (!adev->ip_blocks[i].status.valid)
2974                         continue;
2975                 /* displays are handled in phase1 */
2976                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2977                         continue;
2978                 /* PSP lost connection when err_event_athub occurs */
2979                 if (amdgpu_ras_intr_triggered() &&
2980                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2981                         adev->ip_blocks[i].status.hw = false;
2982                         continue;
2983                 }
2984
2985                 /* skip unnecessary suspend if we do not initialize them yet */
2986                 if (adev->gmc.xgmi.pending_reset &&
2987                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2988                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2989                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2990                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2991                         adev->ip_blocks[i].status.hw = false;
2992                         continue;
2993                 }
2994
2995                 /* skip suspend of gfx and psp for S0ix
2996                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2997                  * like at runtime. PSP is also part of the always on hardware
2998                  * so no need to suspend it.
2999                  */
3000                 if (adev->in_s0ix &&
3001                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3002                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3003                         continue;
3004
3005                 /* XXX handle errors */
3006                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3007                 /* XXX handle errors */
3008                 if (r) {
3009                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3010                                   adev->ip_blocks[i].version->funcs->name, r);
3011                 }
3012                 adev->ip_blocks[i].status.hw = false;
3013                 /* handle putting the SMC in the appropriate state */
3014                 if(!amdgpu_sriov_vf(adev)){
3015                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3016                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3017                                 if (r) {
3018                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3019                                                         adev->mp1_state, r);
3020                                         return r;
3021                                 }
3022                         }
3023                 }
3024         }
3025
3026         return 0;
3027 }
3028
3029 /**
3030  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * Main suspend function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked, clockgating is disabled and the
3036  * suspend callbacks are run.  suspend puts the hardware and software state
3037  * in each IP into a state suitable for suspend.
3038  * Returns 0 on success, negative error code on failure.
3039  */
3040 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3041 {
3042         int r;
3043
3044         if (amdgpu_sriov_vf(adev)) {
3045                 amdgpu_virt_fini_data_exchange(adev);
3046                 amdgpu_virt_request_full_gpu(adev, false);
3047         }
3048
3049         r = amdgpu_device_ip_suspend_phase1(adev);
3050         if (r)
3051                 return r;
3052         r = amdgpu_device_ip_suspend_phase2(adev);
3053
3054         if (amdgpu_sriov_vf(adev))
3055                 amdgpu_virt_release_full_gpu(adev, false);
3056
3057         return r;
3058 }
3059
3060 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3061 {
3062         int i, r;
3063
3064         static enum amd_ip_block_type ip_order[] = {
3065                 AMD_IP_BLOCK_TYPE_COMMON,
3066                 AMD_IP_BLOCK_TYPE_GMC,
3067                 AMD_IP_BLOCK_TYPE_PSP,
3068                 AMD_IP_BLOCK_TYPE_IH,
3069         };
3070
3071         for (i = 0; i < adev->num_ip_blocks; i++) {
3072                 int j;
3073                 struct amdgpu_ip_block *block;
3074
3075                 block = &adev->ip_blocks[i];
3076                 block->status.hw = false;
3077
3078                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3079
3080                         if (block->version->type != ip_order[j] ||
3081                                 !block->status.valid)
3082                                 continue;
3083
3084                         r = block->version->funcs->hw_init(adev);
3085                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3086                         if (r)
3087                                 return r;
3088                         block->status.hw = true;
3089                 }
3090         }
3091
3092         return 0;
3093 }
3094
3095 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3096 {
3097         int i, r;
3098
3099         static enum amd_ip_block_type ip_order[] = {
3100                 AMD_IP_BLOCK_TYPE_SMC,
3101                 AMD_IP_BLOCK_TYPE_DCE,
3102                 AMD_IP_BLOCK_TYPE_GFX,
3103                 AMD_IP_BLOCK_TYPE_SDMA,
3104                 AMD_IP_BLOCK_TYPE_UVD,
3105                 AMD_IP_BLOCK_TYPE_VCE,
3106                 AMD_IP_BLOCK_TYPE_VCN
3107         };
3108
3109         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3110                 int j;
3111                 struct amdgpu_ip_block *block;
3112
3113                 for (j = 0; j < adev->num_ip_blocks; j++) {
3114                         block = &adev->ip_blocks[j];
3115
3116                         if (block->version->type != ip_order[i] ||
3117                                 !block->status.valid ||
3118                                 block->status.hw)
3119                                 continue;
3120
3121                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3122                                 r = block->version->funcs->resume(adev);
3123                         else
3124                                 r = block->version->funcs->hw_init(adev);
3125
3126                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3127                         if (r)
3128                                 return r;
3129                         block->status.hw = true;
3130                 }
3131         }
3132
3133         return 0;
3134 }
3135
3136 /**
3137  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3138  *
3139  * @adev: amdgpu_device pointer
3140  *
3141  * First resume function for hardware IPs.  The list of all the hardware
3142  * IPs that make up the asic is walked and the resume callbacks are run for
3143  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3144  * after a suspend and updates the software state as necessary.  This
3145  * function is also used for restoring the GPU after a GPU reset.
3146  * Returns 0 on success, negative error code on failure.
3147  */
3148 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3149 {
3150         int i, r;
3151
3152         for (i = 0; i < adev->num_ip_blocks; i++) {
3153                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3154                         continue;
3155                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3156                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3157                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3158
3159                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3160                         if (r) {
3161                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3162                                           adev->ip_blocks[i].version->funcs->name, r);
3163                                 return r;
3164                         }
3165                         adev->ip_blocks[i].status.hw = true;
3166                 }
3167         }
3168
3169         return 0;
3170 }
3171
3172 /**
3173  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3174  *
3175  * @adev: amdgpu_device pointer
3176  *
3177  * First resume function for hardware IPs.  The list of all the hardware
3178  * IPs that make up the asic is walked and the resume callbacks are run for
3179  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3180  * functional state after a suspend and updates the software state as
3181  * necessary.  This function is also used for restoring the GPU after a GPU
3182  * reset.
3183  * Returns 0 on success, negative error code on failure.
3184  */
3185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3186 {
3187         int i, r;
3188
3189         for (i = 0; i < adev->num_ip_blocks; i++) {
3190                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3191                         continue;
3192                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3193                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3194                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3195                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3196                         continue;
3197                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3198                 if (r) {
3199                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3200                                   adev->ip_blocks[i].version->funcs->name, r);
3201                         return r;
3202                 }
3203                 adev->ip_blocks[i].status.hw = true;
3204         }
3205
3206         return 0;
3207 }
3208
3209 /**
3210  * amdgpu_device_ip_resume - run resume for hardware IPs
3211  *
3212  * @adev: amdgpu_device pointer
3213  *
3214  * Main resume function for hardware IPs.  The hardware IPs
3215  * are split into two resume functions because they are
3216  * are also used in in recovering from a GPU reset and some additional
3217  * steps need to be take between them.  In this case (S3/S4) they are
3218  * run sequentially.
3219  * Returns 0 on success, negative error code on failure.
3220  */
3221 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3222 {
3223         int r;
3224
3225         r = amdgpu_amdkfd_resume_iommu(adev);
3226         if (r)
3227                 return r;
3228
3229         r = amdgpu_device_ip_resume_phase1(adev);
3230         if (r)
3231                 return r;
3232
3233         r = amdgpu_device_fw_loading(adev);
3234         if (r)
3235                 return r;
3236
3237         r = amdgpu_device_ip_resume_phase2(adev);
3238
3239         return r;
3240 }
3241
3242 /**
3243  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3244  *
3245  * @adev: amdgpu_device pointer
3246  *
3247  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3248  */
3249 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3250 {
3251         if (amdgpu_sriov_vf(adev)) {
3252                 if (adev->is_atom_fw) {
3253                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3254                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3255                 } else {
3256                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3257                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3258                 }
3259
3260                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3261                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3262         }
3263 }
3264
3265 /**
3266  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3267  *
3268  * @asic_type: AMD asic type
3269  *
3270  * Check if there is DC (new modesetting infrastructre) support for an asic.
3271  * returns true if DC has support, false if not.
3272  */
3273 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3274 {
3275         switch (asic_type) {
3276 #ifdef CONFIG_DRM_AMDGPU_SI
3277         case CHIP_HAINAN:
3278 #endif
3279         case CHIP_TOPAZ:
3280                 /* chips with no display hardware */
3281                 return false;
3282 #if defined(CONFIG_DRM_AMD_DC)
3283         case CHIP_TAHITI:
3284         case CHIP_PITCAIRN:
3285         case CHIP_VERDE:
3286         case CHIP_OLAND:
3287                 /*
3288                  * We have systems in the wild with these ASICs that require
3289                  * LVDS and VGA support which is not supported with DC.
3290                  *
3291                  * Fallback to the non-DC driver here by default so as not to
3292                  * cause regressions.
3293                  */
3294 #if defined(CONFIG_DRM_AMD_DC_SI)
3295                 return amdgpu_dc > 0;
3296 #else
3297                 return false;
3298 #endif
3299         case CHIP_BONAIRE:
3300         case CHIP_KAVERI:
3301         case CHIP_KABINI:
3302         case CHIP_MULLINS:
3303                 /*
3304                  * We have systems in the wild with these ASICs that require
3305                  * VGA support which is not supported with DC.
3306                  *
3307                  * Fallback to the non-DC driver here by default so as not to
3308                  * cause regressions.
3309                  */
3310                 return amdgpu_dc > 0;
3311         default:
3312                 return amdgpu_dc != 0;
3313 #else
3314         default:
3315                 if (amdgpu_dc > 0)
3316                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3317                                          "but isn't supported by ASIC, ignoring\n");
3318                 return false;
3319 #endif
3320         }
3321 }
3322
3323 /**
3324  * amdgpu_device_has_dc_support - check if dc is supported
3325  *
3326  * @adev: amdgpu_device pointer
3327  *
3328  * Returns true for supported, false for not supported
3329  */
3330 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3331 {
3332         if (amdgpu_sriov_vf(adev) ||
3333             adev->enable_virtual_display ||
3334             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3335                 return false;
3336
3337         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3338 }
3339
3340 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3341 {
3342         struct amdgpu_device *adev =
3343                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3344         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3345
3346         /* It's a bug to not have a hive within this function */
3347         if (WARN_ON(!hive))
3348                 return;
3349
3350         /*
3351          * Use task barrier to synchronize all xgmi reset works across the
3352          * hive. task_barrier_enter and task_barrier_exit will block
3353          * until all the threads running the xgmi reset works reach
3354          * those points. task_barrier_full will do both blocks.
3355          */
3356         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3357
3358                 task_barrier_enter(&hive->tb);
3359                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3360
3361                 if (adev->asic_reset_res)
3362                         goto fail;
3363
3364                 task_barrier_exit(&hive->tb);
3365                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3366
3367                 if (adev->asic_reset_res)
3368                         goto fail;
3369
3370                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3371                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3372                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3373         } else {
3374
3375                 task_barrier_full(&hive->tb);
3376                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3377         }
3378
3379 fail:
3380         if (adev->asic_reset_res)
3381                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3382                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3383         amdgpu_put_xgmi_hive(hive);
3384 }
3385
3386 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3387 {
3388         char *input = amdgpu_lockup_timeout;
3389         char *timeout_setting = NULL;
3390         int index = 0;
3391         long timeout;
3392         int ret = 0;
3393
3394         /*
3395          * By default timeout for non compute jobs is 10000
3396          * and 60000 for compute jobs.
3397          * In SR-IOV or passthrough mode, timeout for compute
3398          * jobs are 60000 by default.
3399          */
3400         adev->gfx_timeout = msecs_to_jiffies(10000);
3401         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3402         if (amdgpu_sriov_vf(adev))
3403                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3404                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3405         else
3406                 adev->compute_timeout =  msecs_to_jiffies(60000);
3407
3408         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3409                 while ((timeout_setting = strsep(&input, ",")) &&
3410                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3411                         ret = kstrtol(timeout_setting, 0, &timeout);
3412                         if (ret)
3413                                 return ret;
3414
3415                         if (timeout == 0) {
3416                                 index++;
3417                                 continue;
3418                         } else if (timeout < 0) {
3419                                 timeout = MAX_SCHEDULE_TIMEOUT;
3420                                 dev_warn(adev->dev, "lockup timeout disabled");
3421                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3422                         } else {
3423                                 timeout = msecs_to_jiffies(timeout);
3424                         }
3425
3426                         switch (index++) {
3427                         case 0:
3428                                 adev->gfx_timeout = timeout;
3429                                 break;
3430                         case 1:
3431                                 adev->compute_timeout = timeout;
3432                                 break;
3433                         case 2:
3434                                 adev->sdma_timeout = timeout;
3435                                 break;
3436                         case 3:
3437                                 adev->video_timeout = timeout;
3438                                 break;
3439                         default:
3440                                 break;
3441                         }
3442                 }
3443                 /*
3444                  * There is only one value specified and
3445                  * it should apply to all non-compute jobs.
3446                  */
3447                 if (index == 1) {
3448                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3449                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3450                                 adev->compute_timeout = adev->gfx_timeout;
3451                 }
3452         }
3453
3454         return ret;
3455 }
3456
3457 /**
3458  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3459  *
3460  * @adev: amdgpu_device pointer
3461  *
3462  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3463  */
3464 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3465 {
3466         struct iommu_domain *domain;
3467
3468         domain = iommu_get_domain_for_dev(adev->dev);
3469         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3470                 adev->ram_is_direct_mapped = true;
3471 }
3472
3473 static const struct attribute *amdgpu_dev_attributes[] = {
3474         &dev_attr_product_name.attr,
3475         &dev_attr_product_number.attr,
3476         &dev_attr_serial_number.attr,
3477         &dev_attr_pcie_replay_count.attr,
3478         NULL
3479 };
3480
3481 /**
3482  * amdgpu_device_init - initialize the driver
3483  *
3484  * @adev: amdgpu_device pointer
3485  * @flags: driver flags
3486  *
3487  * Initializes the driver info and hw (all asics).
3488  * Returns 0 for success or an error on failure.
3489  * Called at driver startup.
3490  */
3491 int amdgpu_device_init(struct amdgpu_device *adev,
3492                        uint32_t flags)
3493 {
3494         struct drm_device *ddev = adev_to_drm(adev);
3495         struct pci_dev *pdev = adev->pdev;
3496         int r, i;
3497         bool px = false;
3498         u32 max_MBps;
3499
3500         adev->shutdown = false;
3501         adev->flags = flags;
3502
3503         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3504                 adev->asic_type = amdgpu_force_asic_type;
3505         else
3506                 adev->asic_type = flags & AMD_ASIC_MASK;
3507
3508         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3509         if (amdgpu_emu_mode == 1)
3510                 adev->usec_timeout *= 10;
3511         adev->gmc.gart_size = 512 * 1024 * 1024;
3512         adev->accel_working = false;
3513         adev->num_rings = 0;
3514         adev->mman.buffer_funcs = NULL;
3515         adev->mman.buffer_funcs_ring = NULL;
3516         adev->vm_manager.vm_pte_funcs = NULL;
3517         adev->vm_manager.vm_pte_num_scheds = 0;
3518         adev->gmc.gmc_funcs = NULL;
3519         adev->harvest_ip_mask = 0x0;
3520         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3521         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3522
3523         adev->smc_rreg = &amdgpu_invalid_rreg;
3524         adev->smc_wreg = &amdgpu_invalid_wreg;
3525         adev->pcie_rreg = &amdgpu_invalid_rreg;
3526         adev->pcie_wreg = &amdgpu_invalid_wreg;
3527         adev->pciep_rreg = &amdgpu_invalid_rreg;
3528         adev->pciep_wreg = &amdgpu_invalid_wreg;
3529         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3530         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3531         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3532         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3533         adev->didt_rreg = &amdgpu_invalid_rreg;
3534         adev->didt_wreg = &amdgpu_invalid_wreg;
3535         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3536         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3537         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3538         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3539
3540         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3541                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3542                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3543
3544         /* mutex initialization are all done here so we
3545          * can recall function without having locking issues */
3546         mutex_init(&adev->firmware.mutex);
3547         mutex_init(&adev->pm.mutex);
3548         mutex_init(&adev->gfx.gpu_clock_mutex);
3549         mutex_init(&adev->srbm_mutex);
3550         mutex_init(&adev->gfx.pipe_reserve_mutex);
3551         mutex_init(&adev->gfx.gfx_off_mutex);
3552         mutex_init(&adev->grbm_idx_mutex);
3553         mutex_init(&adev->mn_lock);
3554         mutex_init(&adev->virt.vf_errors.lock);
3555         hash_init(adev->mn_hash);
3556         mutex_init(&adev->psp.mutex);
3557         mutex_init(&adev->notifier_lock);
3558         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3559         mutex_init(&adev->benchmark_mutex);
3560
3561         amdgpu_device_init_apu_flags(adev);
3562
3563         r = amdgpu_device_check_arguments(adev);
3564         if (r)
3565                 return r;
3566
3567         spin_lock_init(&adev->mmio_idx_lock);
3568         spin_lock_init(&adev->smc_idx_lock);
3569         spin_lock_init(&adev->pcie_idx_lock);
3570         spin_lock_init(&adev->uvd_ctx_idx_lock);
3571         spin_lock_init(&adev->didt_idx_lock);
3572         spin_lock_init(&adev->gc_cac_idx_lock);
3573         spin_lock_init(&adev->se_cac_idx_lock);
3574         spin_lock_init(&adev->audio_endpt_idx_lock);
3575         spin_lock_init(&adev->mm_stats.lock);
3576
3577         INIT_LIST_HEAD(&adev->shadow_list);
3578         mutex_init(&adev->shadow_list_lock);
3579
3580         INIT_LIST_HEAD(&adev->reset_list);
3581
3582         INIT_LIST_HEAD(&adev->ras_list);
3583
3584         INIT_DELAYED_WORK(&adev->delayed_init_work,
3585                           amdgpu_device_delayed_init_work_handler);
3586         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3587                           amdgpu_device_delay_enable_gfx_off);
3588
3589         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3590
3591         adev->gfx.gfx_off_req_count = 1;
3592         adev->gfx.gfx_off_residency = 0;
3593         adev->gfx.gfx_off_entrycount = 0;
3594         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3595
3596         atomic_set(&adev->throttling_logging_enabled, 1);
3597         /*
3598          * If throttling continues, logging will be performed every minute
3599          * to avoid log flooding. "-1" is subtracted since the thermal
3600          * throttling interrupt comes every second. Thus, the total logging
3601          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3602          * for throttling interrupt) = 60 seconds.
3603          */
3604         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3605         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3606
3607         /* Registers mapping */
3608         /* TODO: block userspace mapping of io register */
3609         if (adev->asic_type >= CHIP_BONAIRE) {
3610                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3611                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3612         } else {
3613                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3614                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3615         }
3616
3617         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3618                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3619
3620         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3621         if (adev->rmmio == NULL) {
3622                 return -ENOMEM;
3623         }
3624         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3625         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3626
3627         amdgpu_device_get_pcie_info(adev);
3628
3629         if (amdgpu_mcbp)
3630                 DRM_INFO("MCBP is enabled\n");
3631
3632         /*
3633          * Reset domain needs to be present early, before XGMI hive discovered
3634          * (if any) and intitialized to use reset sem and in_gpu reset flag
3635          * early on during init and before calling to RREG32.
3636          */
3637         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3638         if (!adev->reset_domain)
3639                 return -ENOMEM;
3640
3641         /* detect hw virtualization here */
3642         amdgpu_detect_virtualization(adev);
3643
3644         r = amdgpu_device_get_job_timeout_settings(adev);
3645         if (r) {
3646                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3647                 return r;
3648         }
3649
3650         /* early init functions */
3651         r = amdgpu_device_ip_early_init(adev);
3652         if (r)
3653                 return r;
3654
3655         /* Enable TMZ based on IP_VERSION */
3656         amdgpu_gmc_tmz_set(adev);
3657
3658         amdgpu_gmc_noretry_set(adev);
3659         /* Need to get xgmi info early to decide the reset behavior*/
3660         if (adev->gmc.xgmi.supported) {
3661                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3662                 if (r)
3663                         return r;
3664         }
3665
3666         /* enable PCIE atomic ops */
3667         if (amdgpu_sriov_vf(adev))
3668                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3669                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3670                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3671         else
3672                 adev->have_atomics_support =
3673                         !pci_enable_atomic_ops_to_root(adev->pdev,
3674                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3675                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3676         if (!adev->have_atomics_support)
3677                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3678
3679         /* doorbell bar mapping and doorbell index init*/
3680         amdgpu_device_doorbell_init(adev);
3681
3682         if (amdgpu_emu_mode == 1) {
3683                 /* post the asic on emulation mode */
3684                 emu_soc_asic_init(adev);
3685                 goto fence_driver_init;
3686         }
3687
3688         amdgpu_reset_init(adev);
3689
3690         /* detect if we are with an SRIOV vbios */
3691         amdgpu_device_detect_sriov_bios(adev);
3692
3693         /* check if we need to reset the asic
3694          *  E.g., driver was not cleanly unloaded previously, etc.
3695          */
3696         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3697                 if (adev->gmc.xgmi.num_physical_nodes) {
3698                         dev_info(adev->dev, "Pending hive reset.\n");
3699                         adev->gmc.xgmi.pending_reset = true;
3700                         /* Only need to init necessary block for SMU to handle the reset */
3701                         for (i = 0; i < adev->num_ip_blocks; i++) {
3702                                 if (!adev->ip_blocks[i].status.valid)
3703                                         continue;
3704                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3705                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3706                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3707                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3708                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3709                                                 adev->ip_blocks[i].version->funcs->name);
3710                                         adev->ip_blocks[i].status.hw = true;
3711                                 }
3712                         }
3713                 } else {
3714                         r = amdgpu_asic_reset(adev);
3715                         if (r) {
3716                                 dev_err(adev->dev, "asic reset on init failed\n");
3717                                 goto failed;
3718                         }
3719                 }
3720         }
3721
3722         pci_enable_pcie_error_reporting(adev->pdev);
3723
3724         /* Post card if necessary */
3725         if (amdgpu_device_need_post(adev)) {
3726                 if (!adev->bios) {
3727                         dev_err(adev->dev, "no vBIOS found\n");
3728                         r = -EINVAL;
3729                         goto failed;
3730                 }
3731                 DRM_INFO("GPU posting now...\n");
3732                 r = amdgpu_device_asic_init(adev);
3733                 if (r) {
3734                         dev_err(adev->dev, "gpu post error!\n");
3735                         goto failed;
3736                 }
3737         }
3738
3739         if (adev->is_atom_fw) {
3740                 /* Initialize clocks */
3741                 r = amdgpu_atomfirmware_get_clock_info(adev);
3742                 if (r) {
3743                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3744                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3745                         goto failed;
3746                 }
3747         } else {
3748                 /* Initialize clocks */
3749                 r = amdgpu_atombios_get_clock_info(adev);
3750                 if (r) {
3751                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3752                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3753                         goto failed;
3754                 }
3755                 /* init i2c buses */
3756                 if (!amdgpu_device_has_dc_support(adev))
3757                         amdgpu_atombios_i2c_init(adev);
3758         }
3759
3760 fence_driver_init:
3761         /* Fence driver */
3762         r = amdgpu_fence_driver_sw_init(adev);
3763         if (r) {
3764                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3765                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3766                 goto failed;
3767         }
3768
3769         /* init the mode config */
3770         drm_mode_config_init(adev_to_drm(adev));
3771
3772         r = amdgpu_device_ip_init(adev);
3773         if (r) {
3774                 /* failed in exclusive mode due to timeout */
3775                 if (amdgpu_sriov_vf(adev) &&
3776                     !amdgpu_sriov_runtime(adev) &&
3777                     amdgpu_virt_mmio_blocked(adev) &&
3778                     !amdgpu_virt_wait_reset(adev)) {
3779                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3780                         /* Don't send request since VF is inactive. */
3781                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3782                         adev->virt.ops = NULL;
3783                         r = -EAGAIN;
3784                         goto release_ras_con;
3785                 }
3786                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3787                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3788                 goto release_ras_con;
3789         }
3790
3791         amdgpu_fence_driver_hw_init(adev);
3792
3793         dev_info(adev->dev,
3794                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3795                         adev->gfx.config.max_shader_engines,
3796                         adev->gfx.config.max_sh_per_se,
3797                         adev->gfx.config.max_cu_per_sh,
3798                         adev->gfx.cu_info.number);
3799
3800         adev->accel_working = true;
3801
3802         amdgpu_vm_check_compute_bug(adev);
3803
3804         /* Initialize the buffer migration limit. */
3805         if (amdgpu_moverate >= 0)
3806                 max_MBps = amdgpu_moverate;
3807         else
3808                 max_MBps = 8; /* Allow 8 MB/s. */
3809         /* Get a log2 for easy divisions. */
3810         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3811
3812         r = amdgpu_pm_sysfs_init(adev);
3813         if (r) {
3814                 adev->pm_sysfs_en = false;
3815                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3816         } else
3817                 adev->pm_sysfs_en = true;
3818
3819         r = amdgpu_ucode_sysfs_init(adev);
3820         if (r) {
3821                 adev->ucode_sysfs_en = false;
3822                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3823         } else
3824                 adev->ucode_sysfs_en = true;
3825
3826         r = amdgpu_psp_sysfs_init(adev);
3827         if (r) {
3828                 adev->psp_sysfs_en = false;
3829                 if (!amdgpu_sriov_vf(adev))
3830                         DRM_ERROR("Creating psp sysfs failed\n");
3831         } else
3832                 adev->psp_sysfs_en = true;
3833
3834         /*
3835          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3836          * Otherwise the mgpu fan boost feature will be skipped due to the
3837          * gpu instance is counted less.
3838          */
3839         amdgpu_register_gpu_instance(adev);
3840
3841         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3842          * explicit gating rather than handling it automatically.
3843          */
3844         if (!adev->gmc.xgmi.pending_reset) {
3845                 r = amdgpu_device_ip_late_init(adev);
3846                 if (r) {
3847                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3848                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3849                         goto release_ras_con;
3850                 }
3851                 /* must succeed. */
3852                 amdgpu_ras_resume(adev);
3853                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3854                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3855         }
3856
3857         if (amdgpu_sriov_vf(adev))
3858                 flush_delayed_work(&adev->delayed_init_work);
3859
3860         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3861         if (r)
3862                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3863
3864         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3865                 r = amdgpu_pmu_init(adev);
3866         if (r)
3867                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3868
3869         /* Have stored pci confspace at hand for restore in sudden PCI error */
3870         if (amdgpu_device_cache_pci_state(adev->pdev))
3871                 pci_restore_state(pdev);
3872
3873         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3874         /* this will fail for cards that aren't VGA class devices, just
3875          * ignore it */
3876         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3877                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3878
3879         if (amdgpu_device_supports_px(ddev)) {
3880                 px = true;
3881                 vga_switcheroo_register_client(adev->pdev,
3882                                                &amdgpu_switcheroo_ops, px);
3883                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3884         }
3885
3886         if (adev->gmc.xgmi.pending_reset)
3887                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3888                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3889
3890         amdgpu_device_check_iommu_direct_map(adev);
3891
3892         return 0;
3893
3894 release_ras_con:
3895         amdgpu_release_ras_context(adev);
3896
3897 failed:
3898         amdgpu_vf_error_trans_all(adev);
3899
3900         return r;
3901 }
3902
3903 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3904 {
3905
3906         /* Clear all CPU mappings pointing to this device */
3907         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3908
3909         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3910         amdgpu_device_doorbell_fini(adev);
3911
3912         iounmap(adev->rmmio);
3913         adev->rmmio = NULL;
3914         if (adev->mman.aper_base_kaddr)
3915                 iounmap(adev->mman.aper_base_kaddr);
3916         adev->mman.aper_base_kaddr = NULL;
3917
3918         /* Memory manager related */
3919         if (!adev->gmc.xgmi.connected_to_cpu) {
3920                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3921                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3922         }
3923 }
3924
3925 /**
3926  * amdgpu_device_fini_hw - tear down the driver
3927  *
3928  * @adev: amdgpu_device pointer
3929  *
3930  * Tear down the driver info (all asics).
3931  * Called at driver shutdown.
3932  */
3933 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3934 {
3935         dev_info(adev->dev, "amdgpu: finishing device.\n");
3936         flush_delayed_work(&adev->delayed_init_work);
3937         adev->shutdown = true;
3938
3939         /* make sure IB test finished before entering exclusive mode
3940          * to avoid preemption on IB test
3941          * */
3942         if (amdgpu_sriov_vf(adev)) {
3943                 amdgpu_virt_request_full_gpu(adev, false);
3944                 amdgpu_virt_fini_data_exchange(adev);
3945         }
3946
3947         /* disable all interrupts */
3948         amdgpu_irq_disable_all(adev);
3949         if (adev->mode_info.mode_config_initialized){
3950                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3951                         drm_helper_force_disable_all(adev_to_drm(adev));
3952                 else
3953                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3954         }
3955         amdgpu_fence_driver_hw_fini(adev);
3956
3957         if (adev->mman.initialized) {
3958                 flush_delayed_work(&adev->mman.bdev.wq);
3959                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3960         }
3961
3962         if (adev->pm_sysfs_en)
3963                 amdgpu_pm_sysfs_fini(adev);
3964         if (adev->ucode_sysfs_en)
3965                 amdgpu_ucode_sysfs_fini(adev);
3966         if (adev->psp_sysfs_en)
3967                 amdgpu_psp_sysfs_fini(adev);
3968         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3969
3970         /* disable ras feature must before hw fini */
3971         amdgpu_ras_pre_fini(adev);
3972
3973         amdgpu_device_ip_fini_early(adev);
3974
3975         amdgpu_irq_fini_hw(adev);
3976
3977         if (adev->mman.initialized)
3978                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3979
3980         amdgpu_gart_dummy_page_fini(adev);
3981
3982         amdgpu_device_unmap_mmio(adev);
3983
3984 }
3985
3986 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3987 {
3988         int idx;
3989
3990         amdgpu_fence_driver_sw_fini(adev);
3991         amdgpu_device_ip_fini(adev);
3992         release_firmware(adev->firmware.gpu_info_fw);
3993         adev->firmware.gpu_info_fw = NULL;
3994         adev->accel_working = false;
3995
3996         amdgpu_reset_fini(adev);
3997
3998         /* free i2c buses */
3999         if (!amdgpu_device_has_dc_support(adev))
4000                 amdgpu_i2c_fini(adev);
4001
4002         if (amdgpu_emu_mode != 1)
4003                 amdgpu_atombios_fini(adev);
4004
4005         kfree(adev->bios);
4006         adev->bios = NULL;
4007         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4008                 vga_switcheroo_unregister_client(adev->pdev);
4009                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4010         }
4011         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4012                 vga_client_unregister(adev->pdev);
4013
4014         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4015
4016                 iounmap(adev->rmmio);
4017                 adev->rmmio = NULL;
4018                 amdgpu_device_doorbell_fini(adev);
4019                 drm_dev_exit(idx);
4020         }
4021
4022         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4023                 amdgpu_pmu_fini(adev);
4024         if (adev->mman.discovery_bin)
4025                 amdgpu_discovery_fini(adev);
4026
4027         amdgpu_reset_put_reset_domain(adev->reset_domain);
4028         adev->reset_domain = NULL;
4029
4030         kfree(adev->pci_state);
4031
4032 }
4033
4034 /**
4035  * amdgpu_device_evict_resources - evict device resources
4036  * @adev: amdgpu device object
4037  *
4038  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4039  * of the vram memory type. Mainly used for evicting device resources
4040  * at suspend time.
4041  *
4042  */
4043 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4044 {
4045         /* No need to evict vram on APUs for suspend to ram or s2idle */
4046         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4047                 return;
4048
4049         if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4050                 DRM_WARN("evicting device resources failed\n");
4051
4052 }
4053
4054 /*
4055  * Suspend & resume.
4056  */
4057 /**
4058  * amdgpu_device_suspend - initiate device suspend
4059  *
4060  * @dev: drm dev pointer
4061  * @fbcon : notify the fbdev of suspend
4062  *
4063  * Puts the hw in the suspend state (all asics).
4064  * Returns 0 for success or an error on failure.
4065  * Called at driver suspend.
4066  */
4067 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4068 {
4069         struct amdgpu_device *adev = drm_to_adev(dev);
4070
4071         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4072                 return 0;
4073
4074         adev->in_suspend = true;
4075
4076         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4077                 DRM_WARN("smart shift update failed\n");
4078
4079         drm_kms_helper_poll_disable(dev);
4080
4081         if (fbcon)
4082                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4083
4084         cancel_delayed_work_sync(&adev->delayed_init_work);
4085
4086         amdgpu_ras_suspend(adev);
4087
4088         amdgpu_device_ip_suspend_phase1(adev);
4089
4090         if (!adev->in_s0ix)
4091                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4092
4093         amdgpu_device_evict_resources(adev);
4094
4095         amdgpu_fence_driver_hw_fini(adev);
4096
4097         amdgpu_device_ip_suspend_phase2(adev);
4098
4099         return 0;
4100 }
4101
4102 /**
4103  * amdgpu_device_resume - initiate device resume
4104  *
4105  * @dev: drm dev pointer
4106  * @fbcon : notify the fbdev of resume
4107  *
4108  * Bring the hw back to operating state (all asics).
4109  * Returns 0 for success or an error on failure.
4110  * Called at driver resume.
4111  */
4112 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4113 {
4114         struct amdgpu_device *adev = drm_to_adev(dev);
4115         int r = 0;
4116
4117         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4118                 return 0;
4119
4120         if (adev->in_s0ix)
4121                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4122
4123         /* post card */
4124         if (amdgpu_device_need_post(adev)) {
4125                 r = amdgpu_device_asic_init(adev);
4126                 if (r)
4127                         dev_err(adev->dev, "amdgpu asic init failed\n");
4128         }
4129
4130         r = amdgpu_device_ip_resume(adev);
4131         if (r) {
4132                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4133                 return r;
4134         }
4135         amdgpu_fence_driver_hw_init(adev);
4136
4137         r = amdgpu_device_ip_late_init(adev);
4138         if (r)
4139                 return r;
4140
4141         queue_delayed_work(system_wq, &adev->delayed_init_work,
4142                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4143
4144         if (!adev->in_s0ix) {
4145                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4146                 if (r)
4147                         return r;
4148         }
4149
4150         /* Make sure IB tests flushed */
4151         flush_delayed_work(&adev->delayed_init_work);
4152
4153         if (fbcon)
4154                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4155
4156         drm_kms_helper_poll_enable(dev);
4157
4158         amdgpu_ras_resume(adev);
4159
4160         /*
4161          * Most of the connector probing functions try to acquire runtime pm
4162          * refs to ensure that the GPU is powered on when connector polling is
4163          * performed. Since we're calling this from a runtime PM callback,
4164          * trying to acquire rpm refs will cause us to deadlock.
4165          *
4166          * Since we're guaranteed to be holding the rpm lock, it's safe to
4167          * temporarily disable the rpm helpers so this doesn't deadlock us.
4168          */
4169 #ifdef CONFIG_PM
4170         dev->dev->power.disable_depth++;
4171 #endif
4172         if (!amdgpu_device_has_dc_support(adev))
4173                 drm_helper_hpd_irq_event(dev);
4174         else
4175                 drm_kms_helper_hotplug_event(dev);
4176 #ifdef CONFIG_PM
4177         dev->dev->power.disable_depth--;
4178 #endif
4179         adev->in_suspend = false;
4180
4181         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4182                 DRM_WARN("smart shift update failed\n");
4183
4184         return 0;
4185 }
4186
4187 /**
4188  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4189  *
4190  * @adev: amdgpu_device pointer
4191  *
4192  * The list of all the hardware IPs that make up the asic is walked and
4193  * the check_soft_reset callbacks are run.  check_soft_reset determines
4194  * if the asic is still hung or not.
4195  * Returns true if any of the IPs are still in a hung state, false if not.
4196  */
4197 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4198 {
4199         int i;
4200         bool asic_hang = false;
4201
4202         if (amdgpu_sriov_vf(adev))
4203                 return true;
4204
4205         if (amdgpu_asic_need_full_reset(adev))
4206                 return true;
4207
4208         for (i = 0; i < adev->num_ip_blocks; i++) {
4209                 if (!adev->ip_blocks[i].status.valid)
4210                         continue;
4211                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4212                         adev->ip_blocks[i].status.hang =
4213                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4214                 if (adev->ip_blocks[i].status.hang) {
4215                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4216                         asic_hang = true;
4217                 }
4218         }
4219         return asic_hang;
4220 }
4221
4222 /**
4223  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4224  *
4225  * @adev: amdgpu_device pointer
4226  *
4227  * The list of all the hardware IPs that make up the asic is walked and the
4228  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4229  * handles any IP specific hardware or software state changes that are
4230  * necessary for a soft reset to succeed.
4231  * Returns 0 on success, negative error code on failure.
4232  */
4233 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4234 {
4235         int i, r = 0;
4236
4237         for (i = 0; i < adev->num_ip_blocks; i++) {
4238                 if (!adev->ip_blocks[i].status.valid)
4239                         continue;
4240                 if (adev->ip_blocks[i].status.hang &&
4241                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4242                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4243                         if (r)
4244                                 return r;
4245                 }
4246         }
4247
4248         return 0;
4249 }
4250
4251 /**
4252  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4253  *
4254  * @adev: amdgpu_device pointer
4255  *
4256  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4257  * reset is necessary to recover.
4258  * Returns true if a full asic reset is required, false if not.
4259  */
4260 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4261 {
4262         int i;
4263
4264         if (amdgpu_asic_need_full_reset(adev))
4265                 return true;
4266
4267         for (i = 0; i < adev->num_ip_blocks; i++) {
4268                 if (!adev->ip_blocks[i].status.valid)
4269                         continue;
4270                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4271                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4272                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4273                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4274                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4275                         if (adev->ip_blocks[i].status.hang) {
4276                                 dev_info(adev->dev, "Some block need full reset!\n");
4277                                 return true;
4278                         }
4279                 }
4280         }
4281         return false;
4282 }
4283
4284 /**
4285  * amdgpu_device_ip_soft_reset - do a soft reset
4286  *
4287  * @adev: amdgpu_device pointer
4288  *
4289  * The list of all the hardware IPs that make up the asic is walked and the
4290  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4291  * IP specific hardware or software state changes that are necessary to soft
4292  * reset the IP.
4293  * Returns 0 on success, negative error code on failure.
4294  */
4295 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4296 {
4297         int i, r = 0;
4298
4299         for (i = 0; i < adev->num_ip_blocks; i++) {
4300                 if (!adev->ip_blocks[i].status.valid)
4301                         continue;
4302                 if (adev->ip_blocks[i].status.hang &&
4303                     adev->ip_blocks[i].version->funcs->soft_reset) {
4304                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4305                         if (r)
4306                                 return r;
4307                 }
4308         }
4309
4310         return 0;
4311 }
4312
4313 /**
4314  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4315  *
4316  * @adev: amdgpu_device pointer
4317  *
4318  * The list of all the hardware IPs that make up the asic is walked and the
4319  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4320  * handles any IP specific hardware or software state changes that are
4321  * necessary after the IP has been soft reset.
4322  * Returns 0 on success, negative error code on failure.
4323  */
4324 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4325 {
4326         int i, r = 0;
4327
4328         for (i = 0; i < adev->num_ip_blocks; i++) {
4329                 if (!adev->ip_blocks[i].status.valid)
4330                         continue;
4331                 if (adev->ip_blocks[i].status.hang &&
4332                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4333                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4334                 if (r)
4335                         return r;
4336         }
4337
4338         return 0;
4339 }
4340
4341 /**
4342  * amdgpu_device_recover_vram - Recover some VRAM contents
4343  *
4344  * @adev: amdgpu_device pointer
4345  *
4346  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4347  * restore things like GPUVM page tables after a GPU reset where
4348  * the contents of VRAM might be lost.
4349  *
4350  * Returns:
4351  * 0 on success, negative error code on failure.
4352  */
4353 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4354 {
4355         struct dma_fence *fence = NULL, *next = NULL;
4356         struct amdgpu_bo *shadow;
4357         struct amdgpu_bo_vm *vmbo;
4358         long r = 1, tmo;
4359
4360         if (amdgpu_sriov_runtime(adev))
4361                 tmo = msecs_to_jiffies(8000);
4362         else
4363                 tmo = msecs_to_jiffies(100);
4364
4365         dev_info(adev->dev, "recover vram bo from shadow start\n");
4366         mutex_lock(&adev->shadow_list_lock);
4367         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4368                 shadow = &vmbo->bo;
4369                 /* No need to recover an evicted BO */
4370                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4371                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4372                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4373                         continue;
4374
4375                 r = amdgpu_bo_restore_shadow(shadow, &next);
4376                 if (r)
4377                         break;
4378
4379                 if (fence) {
4380                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4381                         dma_fence_put(fence);
4382                         fence = next;
4383                         if (tmo == 0) {
4384                                 r = -ETIMEDOUT;
4385                                 break;
4386                         } else if (tmo < 0) {
4387                                 r = tmo;
4388                                 break;
4389                         }
4390                 } else {
4391                         fence = next;
4392                 }
4393         }
4394         mutex_unlock(&adev->shadow_list_lock);
4395
4396         if (fence)
4397                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4398         dma_fence_put(fence);
4399
4400         if (r < 0 || tmo <= 0) {
4401                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4402                 return -EIO;
4403         }
4404
4405         dev_info(adev->dev, "recover vram bo from shadow done\n");
4406         return 0;
4407 }
4408
4409
4410 /**
4411  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4412  *
4413  * @adev: amdgpu_device pointer
4414  * @from_hypervisor: request from hypervisor
4415  *
4416  * do VF FLR and reinitialize Asic
4417  * return 0 means succeeded otherwise failed
4418  */
4419 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4420                                      bool from_hypervisor)
4421 {
4422         int r;
4423         struct amdgpu_hive_info *hive = NULL;
4424         int retry_limit = 0;
4425
4426 retry:
4427         amdgpu_amdkfd_pre_reset(adev);
4428
4429         if (from_hypervisor)
4430                 r = amdgpu_virt_request_full_gpu(adev, true);
4431         else
4432                 r = amdgpu_virt_reset_gpu(adev);
4433         if (r)
4434                 return r;
4435
4436         /* Resume IP prior to SMC */
4437         r = amdgpu_device_ip_reinit_early_sriov(adev);
4438         if (r)
4439                 goto error;
4440
4441         amdgpu_virt_init_data_exchange(adev);
4442
4443         r = amdgpu_device_fw_loading(adev);
4444         if (r)
4445                 return r;
4446
4447         /* now we are okay to resume SMC/CP/SDMA */
4448         r = amdgpu_device_ip_reinit_late_sriov(adev);
4449         if (r)
4450                 goto error;
4451
4452         hive = amdgpu_get_xgmi_hive(adev);
4453         /* Update PSP FW topology after reset */
4454         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4455                 r = amdgpu_xgmi_update_topology(hive, adev);
4456
4457         if (hive)
4458                 amdgpu_put_xgmi_hive(hive);
4459
4460         if (!r) {
4461                 amdgpu_irq_gpu_reset_resume_helper(adev);
4462                 r = amdgpu_ib_ring_tests(adev);
4463
4464                 amdgpu_amdkfd_post_reset(adev);
4465         }
4466
4467 error:
4468         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4469                 amdgpu_inc_vram_lost(adev);
4470                 r = amdgpu_device_recover_vram(adev);
4471         }
4472         amdgpu_virt_release_full_gpu(adev, true);
4473
4474         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4475                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4476                         retry_limit++;
4477                         goto retry;
4478                 } else
4479                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4480         }
4481
4482         return r;
4483 }
4484
4485 /**
4486  * amdgpu_device_has_job_running - check if there is any job in mirror list
4487  *
4488  * @adev: amdgpu_device pointer
4489  *
4490  * check if there is any job in mirror list
4491  */
4492 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4493 {
4494         int i;
4495         struct drm_sched_job *job;
4496
4497         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4498                 struct amdgpu_ring *ring = adev->rings[i];
4499
4500                 if (!ring || !ring->sched.thread)
4501                         continue;
4502
4503                 spin_lock(&ring->sched.job_list_lock);
4504                 job = list_first_entry_or_null(&ring->sched.pending_list,
4505                                                struct drm_sched_job, list);
4506                 spin_unlock(&ring->sched.job_list_lock);
4507                 if (job)
4508                         return true;
4509         }
4510         return false;
4511 }
4512
4513 /**
4514  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4515  *
4516  * @adev: amdgpu_device pointer
4517  *
4518  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4519  * a hung GPU.
4520  */
4521 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4522 {
4523
4524         if (amdgpu_gpu_recovery == 0)
4525                 goto disabled;
4526
4527         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4528                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4529                 return false;
4530         }
4531
4532         if (amdgpu_sriov_vf(adev))
4533                 return true;
4534
4535         if (amdgpu_gpu_recovery == -1) {
4536                 switch (adev->asic_type) {
4537 #ifdef CONFIG_DRM_AMDGPU_SI
4538                 case CHIP_VERDE:
4539                 case CHIP_TAHITI:
4540                 case CHIP_PITCAIRN:
4541                 case CHIP_OLAND:
4542                 case CHIP_HAINAN:
4543 #endif
4544 #ifdef CONFIG_DRM_AMDGPU_CIK
4545                 case CHIP_KAVERI:
4546                 case CHIP_KABINI:
4547                 case CHIP_MULLINS:
4548 #endif
4549                 case CHIP_CARRIZO:
4550                 case CHIP_STONEY:
4551                 case CHIP_CYAN_SKILLFISH:
4552                         goto disabled;
4553                 default:
4554                         break;
4555                 }
4556         }
4557
4558         return true;
4559
4560 disabled:
4561                 dev_info(adev->dev, "GPU recovery disabled.\n");
4562                 return false;
4563 }
4564
4565 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4566 {
4567         u32 i;
4568         int ret = 0;
4569
4570         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4571
4572         dev_info(adev->dev, "GPU mode1 reset\n");
4573
4574         /* disable BM */
4575         pci_clear_master(adev->pdev);
4576
4577         amdgpu_device_cache_pci_state(adev->pdev);
4578
4579         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4580                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4581                 ret = amdgpu_dpm_mode1_reset(adev);
4582         } else {
4583                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4584                 ret = psp_gpu_reset(adev);
4585         }
4586
4587         if (ret)
4588                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4589
4590         amdgpu_device_load_pci_state(adev->pdev);
4591
4592         /* wait for asic to come out of reset */
4593         for (i = 0; i < adev->usec_timeout; i++) {
4594                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4595
4596                 if (memsize != 0xffffffff)
4597                         break;
4598                 udelay(1);
4599         }
4600
4601         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4602         return ret;
4603 }
4604
4605 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4606                                  struct amdgpu_reset_context *reset_context)
4607 {
4608         int i, r = 0;
4609         struct amdgpu_job *job = NULL;
4610         bool need_full_reset =
4611                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4612
4613         if (reset_context->reset_req_dev == adev)
4614                 job = reset_context->job;
4615
4616         if (amdgpu_sriov_vf(adev)) {
4617                 /* stop the data exchange thread */
4618                 amdgpu_virt_fini_data_exchange(adev);
4619         }
4620
4621         amdgpu_fence_driver_isr_toggle(adev, true);
4622
4623         /* block all schedulers and reset given job's ring */
4624         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4625                 struct amdgpu_ring *ring = adev->rings[i];
4626
4627                 if (!ring || !ring->sched.thread)
4628                         continue;
4629
4630                 /*clear job fence from fence drv to avoid force_completion
4631                  *leave NULL and vm flush fence in fence drv */
4632                 amdgpu_fence_driver_clear_job_fences(ring);
4633
4634                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4635                 amdgpu_fence_driver_force_completion(ring);
4636         }
4637
4638         amdgpu_fence_driver_isr_toggle(adev, false);
4639
4640         if (job && job->vm)
4641                 drm_sched_increase_karma(&job->base);
4642
4643         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4644         /* If reset handler not implemented, continue; otherwise return */
4645         if (r == -ENOSYS)
4646                 r = 0;
4647         else
4648                 return r;
4649
4650         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4651         if (!amdgpu_sriov_vf(adev)) {
4652
4653                 if (!need_full_reset)
4654                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4655
4656                 if (!need_full_reset && amdgpu_gpu_recovery) {
4657                         amdgpu_device_ip_pre_soft_reset(adev);
4658                         r = amdgpu_device_ip_soft_reset(adev);
4659                         amdgpu_device_ip_post_soft_reset(adev);
4660                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4661                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4662                                 need_full_reset = true;
4663                         }
4664                 }
4665
4666                 if (need_full_reset)
4667                         r = amdgpu_device_ip_suspend(adev);
4668                 if (need_full_reset)
4669                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4670                 else
4671                         clear_bit(AMDGPU_NEED_FULL_RESET,
4672                                   &reset_context->flags);
4673         }
4674
4675         return r;
4676 }
4677
4678 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4679 {
4680         int i;
4681
4682         lockdep_assert_held(&adev->reset_domain->sem);
4683
4684         for (i = 0; i < adev->num_regs; i++) {
4685                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4686                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4687                                              adev->reset_dump_reg_value[i]);
4688         }
4689
4690         return 0;
4691 }
4692
4693 #ifdef CONFIG_DEV_COREDUMP
4694 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4695                 size_t count, void *data, size_t datalen)
4696 {
4697         struct drm_printer p;
4698         struct amdgpu_device *adev = data;
4699         struct drm_print_iterator iter;
4700         int i;
4701
4702         iter.data = buffer;
4703         iter.offset = 0;
4704         iter.start = offset;
4705         iter.remain = count;
4706
4707         p = drm_coredump_printer(&iter);
4708
4709         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4710         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4711         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4712         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4713         if (adev->reset_task_info.pid)
4714                 drm_printf(&p, "process_name: %s PID: %d\n",
4715                            adev->reset_task_info.process_name,
4716                            adev->reset_task_info.pid);
4717
4718         if (adev->reset_vram_lost)
4719                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4720         if (adev->num_regs) {
4721                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4722
4723                 for (i = 0; i < adev->num_regs; i++)
4724                         drm_printf(&p, "0x%08x: 0x%08x\n",
4725                                    adev->reset_dump_reg_list[i],
4726                                    adev->reset_dump_reg_value[i]);
4727         }
4728
4729         return count - iter.remain;
4730 }
4731
4732 static void amdgpu_devcoredump_free(void *data)
4733 {
4734 }
4735
4736 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4737 {
4738         struct drm_device *dev = adev_to_drm(adev);
4739
4740         ktime_get_ts64(&adev->reset_time);
4741         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4742                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4743 }
4744 #endif
4745
4746 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4747                          struct amdgpu_reset_context *reset_context)
4748 {
4749         struct amdgpu_device *tmp_adev = NULL;
4750         bool need_full_reset, skip_hw_reset, vram_lost = false;
4751         int r = 0;
4752         bool gpu_reset_for_dev_remove = 0;
4753
4754         /* Try reset handler method first */
4755         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4756                                     reset_list);
4757         amdgpu_reset_reg_dumps(tmp_adev);
4758
4759         reset_context->reset_device_list = device_list_handle;
4760         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4761         /* If reset handler not implemented, continue; otherwise return */
4762         if (r == -ENOSYS)
4763                 r = 0;
4764         else
4765                 return r;
4766
4767         /* Reset handler not implemented, use the default method */
4768         need_full_reset =
4769                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4770         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4771
4772         gpu_reset_for_dev_remove =
4773                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4774                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4775
4776         /*
4777          * ASIC reset has to be done on all XGMI hive nodes ASAP
4778          * to allow proper links negotiation in FW (within 1 sec)
4779          */
4780         if (!skip_hw_reset && need_full_reset) {
4781                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4782                         /* For XGMI run all resets in parallel to speed up the process */
4783                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4784                                 tmp_adev->gmc.xgmi.pending_reset = false;
4785                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4786                                         r = -EALREADY;
4787                         } else
4788                                 r = amdgpu_asic_reset(tmp_adev);
4789
4790                         if (r) {
4791                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4792                                          r, adev_to_drm(tmp_adev)->unique);
4793                                 break;
4794                         }
4795                 }
4796
4797                 /* For XGMI wait for all resets to complete before proceed */
4798                 if (!r) {
4799                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4800                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4801                                         flush_work(&tmp_adev->xgmi_reset_work);
4802                                         r = tmp_adev->asic_reset_res;
4803                                         if (r)
4804                                                 break;
4805                                 }
4806                         }
4807                 }
4808         }
4809
4810         if (!r && amdgpu_ras_intr_triggered()) {
4811                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4812                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4813                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4814                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4815                 }
4816
4817                 amdgpu_ras_intr_cleared();
4818         }
4819
4820         /* Since the mode1 reset affects base ip blocks, the
4821          * phase1 ip blocks need to be resumed. Otherwise there
4822          * will be a BIOS signature error and the psp bootloader
4823          * can't load kdb on the next amdgpu install.
4824          */
4825         if (gpu_reset_for_dev_remove) {
4826                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4827                         amdgpu_device_ip_resume_phase1(tmp_adev);
4828
4829                 goto end;
4830         }
4831
4832         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4833                 if (need_full_reset) {
4834                         /* post card */
4835                         r = amdgpu_device_asic_init(tmp_adev);
4836                         if (r) {
4837                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4838                         } else {
4839                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4840                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4841                                 if (r)
4842                                         goto out;
4843
4844                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4845                                 if (r)
4846                                         goto out;
4847
4848                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4849 #ifdef CONFIG_DEV_COREDUMP
4850                                 tmp_adev->reset_vram_lost = vram_lost;
4851                                 memset(&tmp_adev->reset_task_info, 0,
4852                                                 sizeof(tmp_adev->reset_task_info));
4853                                 if (reset_context->job && reset_context->job->vm)
4854                                         tmp_adev->reset_task_info =
4855                                                 reset_context->job->vm->task_info;
4856                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4857 #endif
4858                                 if (vram_lost) {
4859                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4860                                         amdgpu_inc_vram_lost(tmp_adev);
4861                                 }
4862
4863                                 r = amdgpu_device_fw_loading(tmp_adev);
4864                                 if (r)
4865                                         return r;
4866
4867                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4868                                 if (r)
4869                                         goto out;
4870
4871                                 if (vram_lost)
4872                                         amdgpu_device_fill_reset_magic(tmp_adev);
4873
4874                                 /*
4875                                  * Add this ASIC as tracked as reset was already
4876                                  * complete successfully.
4877                                  */
4878                                 amdgpu_register_gpu_instance(tmp_adev);
4879
4880                                 if (!reset_context->hive &&
4881                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4882                                         amdgpu_xgmi_add_device(tmp_adev);
4883
4884                                 r = amdgpu_device_ip_late_init(tmp_adev);
4885                                 if (r)
4886                                         goto out;
4887
4888                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4889
4890                                 /*
4891                                  * The GPU enters bad state once faulty pages
4892                                  * by ECC has reached the threshold, and ras
4893                                  * recovery is scheduled next. So add one check
4894                                  * here to break recovery if it indeed exceeds
4895                                  * bad page threshold, and remind user to
4896                                  * retire this GPU or setting one bigger
4897                                  * bad_page_threshold value to fix this once
4898                                  * probing driver again.
4899                                  */
4900                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4901                                         /* must succeed. */
4902                                         amdgpu_ras_resume(tmp_adev);
4903                                 } else {
4904                                         r = -EINVAL;
4905                                         goto out;
4906                                 }
4907
4908                                 /* Update PSP FW topology after reset */
4909                                 if (reset_context->hive &&
4910                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4911                                         r = amdgpu_xgmi_update_topology(
4912                                                 reset_context->hive, tmp_adev);
4913                         }
4914                 }
4915
4916 out:
4917                 if (!r) {
4918                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4919                         r = amdgpu_ib_ring_tests(tmp_adev);
4920                         if (r) {
4921                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4922                                 need_full_reset = true;
4923                                 r = -EAGAIN;
4924                                 goto end;
4925                         }
4926                 }
4927
4928                 if (!r)
4929                         r = amdgpu_device_recover_vram(tmp_adev);
4930                 else
4931                         tmp_adev->asic_reset_res = r;
4932         }
4933
4934 end:
4935         if (need_full_reset)
4936                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4937         else
4938                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4939         return r;
4940 }
4941
4942 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
4943 {
4944
4945         switch (amdgpu_asic_reset_method(adev)) {
4946         case AMD_RESET_METHOD_MODE1:
4947                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4948                 break;
4949         case AMD_RESET_METHOD_MODE2:
4950                 adev->mp1_state = PP_MP1_STATE_RESET;
4951                 break;
4952         default:
4953                 adev->mp1_state = PP_MP1_STATE_NONE;
4954                 break;
4955         }
4956 }
4957
4958 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
4959 {
4960         amdgpu_vf_error_trans_all(adev);
4961         adev->mp1_state = PP_MP1_STATE_NONE;
4962 }
4963
4964 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4965 {
4966         struct pci_dev *p = NULL;
4967
4968         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4969                         adev->pdev->bus->number, 1);
4970         if (p) {
4971                 pm_runtime_enable(&(p->dev));
4972                 pm_runtime_resume(&(p->dev));
4973         }
4974 }
4975
4976 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4977 {
4978         enum amd_reset_method reset_method;
4979         struct pci_dev *p = NULL;
4980         u64 expires;
4981
4982         /*
4983          * For now, only BACO and mode1 reset are confirmed
4984          * to suffer the audio issue without proper suspended.
4985          */
4986         reset_method = amdgpu_asic_reset_method(adev);
4987         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4988              (reset_method != AMD_RESET_METHOD_MODE1))
4989                 return -EINVAL;
4990
4991         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4992                         adev->pdev->bus->number, 1);
4993         if (!p)
4994                 return -ENODEV;
4995
4996         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4997         if (!expires)
4998                 /*
4999                  * If we cannot get the audio device autosuspend delay,
5000                  * a fixed 4S interval will be used. Considering 3S is
5001                  * the audio controller default autosuspend delay setting.
5002                  * 4S used here is guaranteed to cover that.
5003                  */
5004                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5005
5006         while (!pm_runtime_status_suspended(&(p->dev))) {
5007                 if (!pm_runtime_suspend(&(p->dev)))
5008                         break;
5009
5010                 if (expires < ktime_get_mono_fast_ns()) {
5011                         dev_warn(adev->dev, "failed to suspend display audio\n");
5012                         /* TODO: abort the succeeding gpu reset? */
5013                         return -ETIMEDOUT;
5014                 }
5015         }
5016
5017         pm_runtime_disable(&(p->dev));
5018
5019         return 0;
5020 }
5021
5022 static void amdgpu_device_recheck_guilty_jobs(
5023         struct amdgpu_device *adev, struct list_head *device_list_handle,
5024         struct amdgpu_reset_context *reset_context)
5025 {
5026         int i, r = 0;
5027
5028         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5029                 struct amdgpu_ring *ring = adev->rings[i];
5030                 int ret = 0;
5031                 struct drm_sched_job *s_job;
5032
5033                 if (!ring || !ring->sched.thread)
5034                         continue;
5035
5036                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5037                                 struct drm_sched_job, list);
5038                 if (s_job == NULL)
5039                         continue;
5040
5041                 /* clear job's guilty and depend the folowing step to decide the real one */
5042                 drm_sched_reset_karma(s_job);
5043                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5044
5045                 if (!s_job->s_fence->parent) {
5046                         DRM_WARN("Failed to get a HW fence for job!");
5047                         continue;
5048                 }
5049
5050                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5051                 if (ret == 0) { /* timeout */
5052                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5053                                                 ring->sched.name, s_job->id);
5054
5055
5056                         amdgpu_fence_driver_isr_toggle(adev, true);
5057
5058                         /* Clear this failed job from fence array */
5059                         amdgpu_fence_driver_clear_job_fences(ring);
5060
5061                         amdgpu_fence_driver_isr_toggle(adev, false);
5062
5063                         /* Since the job won't signal and we go for
5064                          * another resubmit drop this parent pointer
5065                          */
5066                         dma_fence_put(s_job->s_fence->parent);
5067                         s_job->s_fence->parent = NULL;
5068
5069                         /* set guilty */
5070                         drm_sched_increase_karma(s_job);
5071                         amdgpu_reset_prepare_hwcontext(adev, reset_context);
5072 retry:
5073                         /* do hw reset */
5074                         if (amdgpu_sriov_vf(adev)) {
5075                                 amdgpu_virt_fini_data_exchange(adev);
5076                                 r = amdgpu_device_reset_sriov(adev, false);
5077                                 if (r)
5078                                         adev->asic_reset_res = r;
5079                         } else {
5080                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5081                                           &reset_context->flags);
5082                                 r = amdgpu_do_asic_reset(device_list_handle,
5083                                                          reset_context);
5084                                 if (r && r == -EAGAIN)
5085                                         goto retry;
5086                         }
5087
5088                         /*
5089                          * add reset counter so that the following
5090                          * resubmitted job could flush vmid
5091                          */
5092                         atomic_inc(&adev->gpu_reset_counter);
5093                         continue;
5094                 }
5095
5096                 /* got the hw fence, signal finished fence */
5097                 atomic_dec(ring->sched.score);
5098                 dma_fence_get(&s_job->s_fence->finished);
5099                 dma_fence_signal(&s_job->s_fence->finished);
5100                 dma_fence_put(&s_job->s_fence->finished);
5101
5102                 /* remove node from list and free the job */
5103                 spin_lock(&ring->sched.job_list_lock);
5104                 list_del_init(&s_job->list);
5105                 spin_unlock(&ring->sched.job_list_lock);
5106                 ring->sched.ops->free_job(s_job);
5107         }
5108 }
5109
5110 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5111 {
5112         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5113
5114 #if defined(CONFIG_DEBUG_FS)
5115         if (!amdgpu_sriov_vf(adev))
5116                 cancel_work(&adev->reset_work);
5117 #endif
5118
5119         if (adev->kfd.dev)
5120                 cancel_work(&adev->kfd.reset_work);
5121
5122         if (amdgpu_sriov_vf(adev))
5123                 cancel_work(&adev->virt.flr_work);
5124
5125         if (con && adev->ras_enabled)
5126                 cancel_work(&con->recovery_work);
5127
5128 }
5129
5130
5131 /**
5132  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5133  *
5134  * @adev: amdgpu_device pointer
5135  * @job: which job trigger hang
5136  *
5137  * Attempt to reset the GPU if it has hung (all asics).
5138  * Attempt to do soft-reset or full-reset and reinitialize Asic
5139  * Returns 0 for success or an error on failure.
5140  */
5141
5142 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5143                               struct amdgpu_job *job,
5144                               struct amdgpu_reset_context *reset_context)
5145 {
5146         struct list_head device_list, *device_list_handle =  NULL;
5147         bool job_signaled = false;
5148         struct amdgpu_hive_info *hive = NULL;
5149         struct amdgpu_device *tmp_adev = NULL;
5150         int i, r = 0;
5151         bool need_emergency_restart = false;
5152         bool audio_suspended = false;
5153         int tmp_vram_lost_counter;
5154         bool gpu_reset_for_dev_remove = false;
5155
5156         gpu_reset_for_dev_remove =
5157                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5158                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5159
5160         /*
5161          * Special case: RAS triggered and full reset isn't supported
5162          */
5163         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5164
5165         /*
5166          * Flush RAM to disk so that after reboot
5167          * the user can read log and see why the system rebooted.
5168          */
5169         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5170                 DRM_WARN("Emergency reboot.");
5171
5172                 ksys_sync_helper();
5173                 emergency_restart();
5174         }
5175
5176         dev_info(adev->dev, "GPU %s begin!\n",
5177                 need_emergency_restart ? "jobs stop":"reset");
5178
5179         if (!amdgpu_sriov_vf(adev))
5180                 hive = amdgpu_get_xgmi_hive(adev);
5181         if (hive)
5182                 mutex_lock(&hive->hive_lock);
5183
5184         reset_context->job = job;
5185         reset_context->hive = hive;
5186
5187         /*
5188          * Build list of devices to reset.
5189          * In case we are in XGMI hive mode, resort the device list
5190          * to put adev in the 1st position.
5191          */
5192         INIT_LIST_HEAD(&device_list);
5193         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5194                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5195                         list_add_tail(&tmp_adev->reset_list, &device_list);
5196                 if (!list_is_first(&adev->reset_list, &device_list))
5197                         list_rotate_to_front(&adev->reset_list, &device_list);
5198                 device_list_handle = &device_list;
5199         } else {
5200                 list_add_tail(&adev->reset_list, &device_list);
5201                 device_list_handle = &device_list;
5202         }
5203
5204         /* We need to lock reset domain only once both for XGMI and single device */
5205         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5206                                     reset_list);
5207         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5208
5209         /* block all schedulers and reset given job's ring */
5210         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5211
5212                 amdgpu_device_set_mp1_state(tmp_adev);
5213
5214                 /*
5215                  * Try to put the audio codec into suspend state
5216                  * before gpu reset started.
5217                  *
5218                  * Due to the power domain of the graphics device
5219                  * is shared with AZ power domain. Without this,
5220                  * we may change the audio hardware from behind
5221                  * the audio driver's back. That will trigger
5222                  * some audio codec errors.
5223                  */
5224                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5225                         audio_suspended = true;
5226
5227                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5228
5229                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5230
5231                 if (!amdgpu_sriov_vf(tmp_adev))
5232                         amdgpu_amdkfd_pre_reset(tmp_adev);
5233
5234                 /*
5235                  * Mark these ASICs to be reseted as untracked first
5236                  * And add them back after reset completed
5237                  */
5238                 amdgpu_unregister_gpu_instance(tmp_adev);
5239
5240                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5241
5242                 /* disable ras on ALL IPs */
5243                 if (!need_emergency_restart &&
5244                       amdgpu_device_ip_need_full_reset(tmp_adev))
5245                         amdgpu_ras_suspend(tmp_adev);
5246
5247                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5248                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5249
5250                         if (!ring || !ring->sched.thread)
5251                                 continue;
5252
5253                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5254
5255                         if (need_emergency_restart)
5256                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5257                 }
5258                 atomic_inc(&tmp_adev->gpu_reset_counter);
5259         }
5260
5261         if (need_emergency_restart)
5262                 goto skip_sched_resume;
5263
5264         /*
5265          * Must check guilty signal here since after this point all old
5266          * HW fences are force signaled.
5267          *
5268          * job->base holds a reference to parent fence
5269          */
5270         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5271                 job_signaled = true;
5272                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5273                 goto skip_hw_reset;
5274         }
5275
5276 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5277         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5278                 if (gpu_reset_for_dev_remove) {
5279                         /* Workaroud for ASICs need to disable SMC first */
5280                         amdgpu_device_smu_fini_early(tmp_adev);
5281                 }
5282                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5283                 /*TODO Should we stop ?*/
5284                 if (r) {
5285                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5286                                   r, adev_to_drm(tmp_adev)->unique);
5287                         tmp_adev->asic_reset_res = r;
5288                 }
5289
5290                 /*
5291                  * Drop all pending non scheduler resets. Scheduler resets
5292                  * were already dropped during drm_sched_stop
5293                  */
5294                 amdgpu_device_stop_pending_resets(tmp_adev);
5295         }
5296
5297         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5298         /* Actual ASIC resets if needed.*/
5299         /* Host driver will handle XGMI hive reset for SRIOV */
5300         if (amdgpu_sriov_vf(adev)) {
5301                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5302                 if (r)
5303                         adev->asic_reset_res = r;
5304
5305                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5306                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5307                         amdgpu_ras_resume(adev);
5308         } else {
5309                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5310                 if (r && r == -EAGAIN) {
5311                         set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
5312                         adev->asic_reset_res = 0;
5313                         goto retry;
5314                 }
5315
5316                 if (!r && gpu_reset_for_dev_remove)
5317                         goto recover_end;
5318         }
5319
5320 skip_hw_reset:
5321
5322         /* Post ASIC reset for all devs .*/
5323         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5324
5325                 /*
5326                  * Sometimes a later bad compute job can block a good gfx job as gfx
5327                  * and compute ring share internal GC HW mutually. We add an additional
5328                  * guilty jobs recheck step to find the real guilty job, it synchronously
5329                  * submits and pends for the first job being signaled. If it gets timeout,
5330                  * we identify it as a real guilty job.
5331                  */
5332                 if (amdgpu_gpu_recovery == 2 &&
5333                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5334                         amdgpu_device_recheck_guilty_jobs(
5335                                 tmp_adev, device_list_handle, reset_context);
5336
5337                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5338                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5339
5340                         if (!ring || !ring->sched.thread)
5341                                 continue;
5342
5343                         /* No point to resubmit jobs if we didn't HW reset*/
5344                         if (!tmp_adev->asic_reset_res && !job_signaled)
5345                                 drm_sched_resubmit_jobs(&ring->sched);
5346
5347                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5348                 }
5349
5350                 if (adev->enable_mes)
5351                         amdgpu_mes_self_test(tmp_adev);
5352
5353                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5354                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5355                 }
5356
5357                 if (tmp_adev->asic_reset_res)
5358                         r = tmp_adev->asic_reset_res;
5359
5360                 tmp_adev->asic_reset_res = 0;
5361
5362                 if (r) {
5363                         /* bad news, how to tell it to userspace ? */
5364                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5365                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5366                 } else {
5367                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5368                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5369                                 DRM_WARN("smart shift update failed\n");
5370                 }
5371         }
5372
5373 skip_sched_resume:
5374         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5375                 /* unlock kfd: SRIOV would do it separately */
5376                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5377                         amdgpu_amdkfd_post_reset(tmp_adev);
5378
5379                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5380                  * need to bring up kfd here if it's not be initialized before
5381                  */
5382                 if (!adev->kfd.init_complete)
5383                         amdgpu_amdkfd_device_init(adev);
5384
5385                 if (audio_suspended)
5386                         amdgpu_device_resume_display_audio(tmp_adev);
5387
5388                 amdgpu_device_unset_mp1_state(tmp_adev);
5389         }
5390
5391 recover_end:
5392         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5393                                             reset_list);
5394         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5395
5396         if (hive) {
5397                 mutex_unlock(&hive->hive_lock);
5398                 amdgpu_put_xgmi_hive(hive);
5399         }
5400
5401         if (r)
5402                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5403
5404         atomic_set(&adev->reset_domain->reset_res, r);
5405         return r;
5406 }
5407
5408 /**
5409  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5410  *
5411  * @adev: amdgpu_device pointer
5412  *
5413  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5414  * and lanes) of the slot the device is in. Handles APUs and
5415  * virtualized environments where PCIE config space may not be available.
5416  */
5417 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5418 {
5419         struct pci_dev *pdev;
5420         enum pci_bus_speed speed_cap, platform_speed_cap;
5421         enum pcie_link_width platform_link_width;
5422
5423         if (amdgpu_pcie_gen_cap)
5424                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5425
5426         if (amdgpu_pcie_lane_cap)
5427                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5428
5429         /* covers APUs as well */
5430         if (pci_is_root_bus(adev->pdev->bus)) {
5431                 if (adev->pm.pcie_gen_mask == 0)
5432                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5433                 if (adev->pm.pcie_mlw_mask == 0)
5434                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5435                 return;
5436         }
5437
5438         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5439                 return;
5440
5441         pcie_bandwidth_available(adev->pdev, NULL,
5442                                  &platform_speed_cap, &platform_link_width);
5443
5444         if (adev->pm.pcie_gen_mask == 0) {
5445                 /* asic caps */
5446                 pdev = adev->pdev;
5447                 speed_cap = pcie_get_speed_cap(pdev);
5448                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5449                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5450                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5451                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5452                 } else {
5453                         if (speed_cap == PCIE_SPEED_32_0GT)
5454                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5455                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5456                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5457                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5458                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5459                         else if (speed_cap == PCIE_SPEED_16_0GT)
5460                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5461                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5462                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5463                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5464                         else if (speed_cap == PCIE_SPEED_8_0GT)
5465                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5466                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5467                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5468                         else if (speed_cap == PCIE_SPEED_5_0GT)
5469                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5470                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5471                         else
5472                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5473                 }
5474                 /* platform caps */
5475                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5476                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5477                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5478                 } else {
5479                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5480                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5481                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5482                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5483                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5484                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5485                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5486                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5487                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5488                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5489                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5490                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5491                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5492                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5493                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5494                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5495                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5496                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5497                         else
5498                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5499
5500                 }
5501         }
5502         if (adev->pm.pcie_mlw_mask == 0) {
5503                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5504                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5505                 } else {
5506                         switch (platform_link_width) {
5507                         case PCIE_LNK_X32:
5508                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5509                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5510                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5511                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5512                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5513                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5514                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5515                                 break;
5516                         case PCIE_LNK_X16:
5517                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5518                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5519                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5520                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5521                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5522                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5523                                 break;
5524                         case PCIE_LNK_X12:
5525                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5526                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5527                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5528                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5529                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5530                                 break;
5531                         case PCIE_LNK_X8:
5532                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5533                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5534                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5535                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5536                                 break;
5537                         case PCIE_LNK_X4:
5538                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5539                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5540                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5541                                 break;
5542                         case PCIE_LNK_X2:
5543                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5544                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5545                                 break;
5546                         case PCIE_LNK_X1:
5547                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5548                                 break;
5549                         default:
5550                                 break;
5551                         }
5552                 }
5553         }
5554 }
5555
5556 /**
5557  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5558  *
5559  * @adev: amdgpu_device pointer
5560  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5561  *
5562  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5563  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5564  * @peer_adev.
5565  */
5566 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5567                                       struct amdgpu_device *peer_adev)
5568 {
5569 #ifdef CONFIG_HSA_AMD_P2P
5570         uint64_t address_mask = peer_adev->dev->dma_mask ?
5571                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5572         resource_size_t aper_limit =
5573                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5574         bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
5575                           !(pci_p2pdma_distance_many(adev->pdev,
5576                                         &peer_adev->dev, 1, true) < 0);
5577
5578         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5579                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5580                 !(adev->gmc.aper_base & address_mask ||
5581                   aper_limit & address_mask));
5582 #else
5583         return false;
5584 #endif
5585 }
5586
5587 int amdgpu_device_baco_enter(struct drm_device *dev)
5588 {
5589         struct amdgpu_device *adev = drm_to_adev(dev);
5590         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5591
5592         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5593                 return -ENOTSUPP;
5594
5595         if (ras && adev->ras_enabled &&
5596             adev->nbio.funcs->enable_doorbell_interrupt)
5597                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5598
5599         return amdgpu_dpm_baco_enter(adev);
5600 }
5601
5602 int amdgpu_device_baco_exit(struct drm_device *dev)
5603 {
5604         struct amdgpu_device *adev = drm_to_adev(dev);
5605         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5606         int ret = 0;
5607
5608         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5609                 return -ENOTSUPP;
5610
5611         ret = amdgpu_dpm_baco_exit(adev);
5612         if (ret)
5613                 return ret;
5614
5615         if (ras && adev->ras_enabled &&
5616             adev->nbio.funcs->enable_doorbell_interrupt)
5617                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5618
5619         if (amdgpu_passthrough(adev) &&
5620             adev->nbio.funcs->clear_doorbell_interrupt)
5621                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5622
5623         return 0;
5624 }
5625
5626 /**
5627  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5628  * @pdev: PCI device struct
5629  * @state: PCI channel state
5630  *
5631  * Description: Called when a PCI error is detected.
5632  *
5633  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5634  */
5635 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5636 {
5637         struct drm_device *dev = pci_get_drvdata(pdev);
5638         struct amdgpu_device *adev = drm_to_adev(dev);
5639         int i;
5640
5641         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5642
5643         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5644                 DRM_WARN("No support for XGMI hive yet...");
5645                 return PCI_ERS_RESULT_DISCONNECT;
5646         }
5647
5648         adev->pci_channel_state = state;
5649
5650         switch (state) {
5651         case pci_channel_io_normal:
5652                 return PCI_ERS_RESULT_CAN_RECOVER;
5653         /* Fatal error, prepare for slot reset */
5654         case pci_channel_io_frozen:
5655                 /*
5656                  * Locking adev->reset_domain->sem will prevent any external access
5657                  * to GPU during PCI error recovery
5658                  */
5659                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5660                 amdgpu_device_set_mp1_state(adev);
5661
5662                 /*
5663                  * Block any work scheduling as we do for regular GPU reset
5664                  * for the duration of the recovery
5665                  */
5666                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5667                         struct amdgpu_ring *ring = adev->rings[i];
5668
5669                         if (!ring || !ring->sched.thread)
5670                                 continue;
5671
5672                         drm_sched_stop(&ring->sched, NULL);
5673                 }
5674                 atomic_inc(&adev->gpu_reset_counter);
5675                 return PCI_ERS_RESULT_NEED_RESET;
5676         case pci_channel_io_perm_failure:
5677                 /* Permanent error, prepare for device removal */
5678                 return PCI_ERS_RESULT_DISCONNECT;
5679         }
5680
5681         return PCI_ERS_RESULT_NEED_RESET;
5682 }
5683
5684 /**
5685  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5686  * @pdev: pointer to PCI device
5687  */
5688 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5689 {
5690
5691         DRM_INFO("PCI error: mmio enabled callback!!\n");
5692
5693         /* TODO - dump whatever for debugging purposes */
5694
5695         /* This called only if amdgpu_pci_error_detected returns
5696          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5697          * works, no need to reset slot.
5698          */
5699
5700         return PCI_ERS_RESULT_RECOVERED;
5701 }
5702
5703 /**
5704  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5705  * @pdev: PCI device struct
5706  *
5707  * Description: This routine is called by the pci error recovery
5708  * code after the PCI slot has been reset, just before we
5709  * should resume normal operations.
5710  */
5711 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5712 {
5713         struct drm_device *dev = pci_get_drvdata(pdev);
5714         struct amdgpu_device *adev = drm_to_adev(dev);
5715         int r, i;
5716         struct amdgpu_reset_context reset_context;
5717         u32 memsize;
5718         struct list_head device_list;
5719
5720         DRM_INFO("PCI error: slot reset callback!!\n");
5721
5722         memset(&reset_context, 0, sizeof(reset_context));
5723
5724         INIT_LIST_HEAD(&device_list);
5725         list_add_tail(&adev->reset_list, &device_list);
5726
5727         /* wait for asic to come out of reset */
5728         msleep(500);
5729
5730         /* Restore PCI confspace */
5731         amdgpu_device_load_pci_state(pdev);
5732
5733         /* confirm  ASIC came out of reset */
5734         for (i = 0; i < adev->usec_timeout; i++) {
5735                 memsize = amdgpu_asic_get_config_memsize(adev);
5736
5737                 if (memsize != 0xffffffff)
5738                         break;
5739                 udelay(1);
5740         }
5741         if (memsize == 0xffffffff) {
5742                 r = -ETIME;
5743                 goto out;
5744         }
5745
5746         reset_context.method = AMD_RESET_METHOD_NONE;
5747         reset_context.reset_req_dev = adev;
5748         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5749         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5750         set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
5751
5752         adev->no_hw_access = true;
5753         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5754         adev->no_hw_access = false;
5755         if (r)
5756                 goto out;
5757
5758         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5759
5760 out:
5761         if (!r) {
5762                 if (amdgpu_device_cache_pci_state(adev->pdev))
5763                         pci_restore_state(adev->pdev);
5764
5765                 DRM_INFO("PCIe error recovery succeeded\n");
5766         } else {
5767                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5768                 amdgpu_device_unset_mp1_state(adev);
5769                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5770         }
5771
5772         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5773 }
5774
5775 /**
5776  * amdgpu_pci_resume() - resume normal ops after PCI reset
5777  * @pdev: pointer to PCI device
5778  *
5779  * Called when the error recovery driver tells us that its
5780  * OK to resume normal operation.
5781  */
5782 void amdgpu_pci_resume(struct pci_dev *pdev)
5783 {
5784         struct drm_device *dev = pci_get_drvdata(pdev);
5785         struct amdgpu_device *adev = drm_to_adev(dev);
5786         int i;
5787
5788
5789         DRM_INFO("PCI error: resume callback!!\n");
5790
5791         /* Only continue execution for the case of pci_channel_io_frozen */
5792         if (adev->pci_channel_state != pci_channel_io_frozen)
5793                 return;
5794
5795         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5796                 struct amdgpu_ring *ring = adev->rings[i];
5797
5798                 if (!ring || !ring->sched.thread)
5799                         continue;
5800
5801
5802                 drm_sched_resubmit_jobs(&ring->sched);
5803                 drm_sched_start(&ring->sched, true);
5804         }
5805
5806         amdgpu_device_unset_mp1_state(adev);
5807         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5808 }
5809
5810 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5811 {
5812         struct drm_device *dev = pci_get_drvdata(pdev);
5813         struct amdgpu_device *adev = drm_to_adev(dev);
5814         int r;
5815
5816         r = pci_save_state(pdev);
5817         if (!r) {
5818                 kfree(adev->pci_state);
5819
5820                 adev->pci_state = pci_store_saved_state(pdev);
5821
5822                 if (!adev->pci_state) {
5823                         DRM_ERROR("Failed to store PCI saved state");
5824                         return false;
5825                 }
5826         } else {
5827                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5828                 return false;
5829         }
5830
5831         return true;
5832 }
5833
5834 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5835 {
5836         struct drm_device *dev = pci_get_drvdata(pdev);
5837         struct amdgpu_device *adev = drm_to_adev(dev);
5838         int r;
5839
5840         if (!adev->pci_state)
5841                 return false;
5842
5843         r = pci_load_saved_state(pdev, adev->pci_state);
5844
5845         if (!r) {
5846                 pci_restore_state(pdev);
5847         } else {
5848                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5849                 return false;
5850         }
5851
5852         return true;
5853 }
5854
5855 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5856                 struct amdgpu_ring *ring)
5857 {
5858 #ifdef CONFIG_X86_64
5859         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5860                 return;
5861 #endif
5862         if (adev->gmc.xgmi.connected_to_cpu)
5863                 return;
5864
5865         if (ring && ring->funcs->emit_hdp_flush)
5866                 amdgpu_ring_emit_hdp_flush(ring);
5867         else
5868                 amdgpu_asic_flush_hdp(adev, ring);
5869 }
5870
5871 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5872                 struct amdgpu_ring *ring)
5873 {
5874 #ifdef CONFIG_X86_64
5875         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5876                 return;
5877 #endif
5878         if (adev->gmc.xgmi.connected_to_cpu)
5879                 return;
5880
5881         amdgpu_asic_invalidate_hdp(adev, ring);
5882 }
5883
5884 int amdgpu_in_reset(struct amdgpu_device *adev)
5885 {
5886         return atomic_read(&adev->reset_domain->in_gpu_reset);
5887         }
5888         
5889 /**
5890  * amdgpu_device_halt() - bring hardware to some kind of halt state
5891  *
5892  * @adev: amdgpu_device pointer
5893  *
5894  * Bring hardware to some kind of halt state so that no one can touch it
5895  * any more. It will help to maintain error context when error occurred.
5896  * Compare to a simple hang, the system will keep stable at least for SSH
5897  * access. Then it should be trivial to inspect the hardware state and
5898  * see what's going on. Implemented as following:
5899  *
5900  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5901  *    clears all CPU mappings to device, disallows remappings through page faults
5902  * 2. amdgpu_irq_disable_all() disables all interrupts
5903  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5904  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5905  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5906  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5907  *    flush any in flight DMA operations
5908  */
5909 void amdgpu_device_halt(struct amdgpu_device *adev)
5910 {
5911         struct pci_dev *pdev = adev->pdev;
5912         struct drm_device *ddev = adev_to_drm(adev);
5913
5914         drm_dev_unplug(ddev);
5915
5916         amdgpu_irq_disable_all(adev);
5917
5918         amdgpu_fence_driver_hw_fini(adev);
5919
5920         adev->no_hw_access = true;
5921
5922         amdgpu_device_unmap_mmio(adev);
5923
5924         pci_disable_device(pdev);
5925         pci_wait_for_pending_transaction(pdev);
5926 }
5927
5928 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5929                                 u32 reg)
5930 {
5931         unsigned long flags, address, data;
5932         u32 r;
5933
5934         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5935         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5936
5937         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5938         WREG32(address, reg * 4);
5939         (void)RREG32(address);
5940         r = RREG32(data);
5941         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5942         return r;
5943 }
5944
5945 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5946                                 u32 reg, u32 v)
5947 {
5948         unsigned long flags, address, data;
5949
5950         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5951         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5952
5953         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5954         WREG32(address, reg * 4);
5955         (void)RREG32(address);
5956         WREG32(data, v);
5957         (void)RREG32(data);
5958         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5959 }