2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_probe_helper.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/efi.h>
43 #include "amdgpu_trace.h"
44 #include "amdgpu_i2c.h"
46 #include "amdgpu_atombios.h"
47 #include "amdgpu_atomfirmware.h"
49 #ifdef CONFIG_DRM_AMDGPU_SI
52 #ifdef CONFIG_DRM_AMDGPU_CIK
58 #include "bif/bif_4_1_d.h"
59 #include <linux/firmware.h>
60 #include "amdgpu_vf_error.h"
62 #include "amdgpu_amdkfd.h"
63 #include "amdgpu_pm.h"
65 #include "amdgpu_xgmi.h"
66 #include "amdgpu_ras.h"
67 #include "amdgpu_pmu.h"
68 #include "amdgpu_fru_eeprom.h"
69 #include "amdgpu_reset.h"
71 #include <linux/suspend.h>
72 #include <drm/task_barrier.h>
73 #include <linux/pm_runtime.h>
75 #include <drm/drm_drv.h>
77 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
90 #define AMDGPU_RESUME_MS 2000
91 #define AMDGPU_MAX_RETRY_LIMIT 2
92 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
94 const char *amdgpu_asic_name[] = {
136 * DOC: pcie_replay_count
138 * The amdgpu driver provides a sysfs API for reporting the total number
139 * of PCIe replays (NAKs)
140 * The file pcie_replay_count is used for this and returns the total
141 * number of replays as a sum of the NAKs generated and NAKs received
144 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
145 struct device_attribute *attr, char *buf)
147 struct drm_device *ddev = dev_get_drvdata(dev);
148 struct amdgpu_device *adev = drm_to_adev(ddev);
149 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
151 return sysfs_emit(buf, "%llu\n", cnt);
154 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
155 amdgpu_device_get_pcie_replay_count, NULL);
157 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
162 * The amdgpu driver provides a sysfs API for reporting the product name
164 * The file serial_number is used for this and returns the product name
165 * as returned from the FRU.
166 * NOTE: This is only available for certain server cards
169 static ssize_t amdgpu_device_get_product_name(struct device *dev,
170 struct device_attribute *attr, char *buf)
172 struct drm_device *ddev = dev_get_drvdata(dev);
173 struct amdgpu_device *adev = drm_to_adev(ddev);
175 return sysfs_emit(buf, "%s\n", adev->product_name);
178 static DEVICE_ATTR(product_name, S_IRUGO,
179 amdgpu_device_get_product_name, NULL);
182 * DOC: product_number
184 * The amdgpu driver provides a sysfs API for reporting the part number
186 * The file serial_number is used for this and returns the part number
187 * as returned from the FRU.
188 * NOTE: This is only available for certain server cards
191 static ssize_t amdgpu_device_get_product_number(struct device *dev,
192 struct device_attribute *attr, char *buf)
194 struct drm_device *ddev = dev_get_drvdata(dev);
195 struct amdgpu_device *adev = drm_to_adev(ddev);
197 return sysfs_emit(buf, "%s\n", adev->product_number);
200 static DEVICE_ATTR(product_number, S_IRUGO,
201 amdgpu_device_get_product_number, NULL);
206 * The amdgpu driver provides a sysfs API for reporting the serial number
208 * The file serial_number is used for this and returns the serial number
209 * as returned from the FRU.
210 * NOTE: This is only available for certain server cards
213 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
214 struct device_attribute *attr, char *buf)
216 struct drm_device *ddev = dev_get_drvdata(dev);
217 struct amdgpu_device *adev = drm_to_adev(ddev);
219 return sysfs_emit(buf, "%s\n", adev->serial);
222 static DEVICE_ATTR(serial_number, S_IRUGO,
223 amdgpu_device_get_serial_number, NULL);
226 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
228 * @dev: drm_device pointer
230 * Returns true if the device is a dGPU with ATPX power control,
231 * otherwise return false.
233 bool amdgpu_device_supports_px(struct drm_device *dev)
235 struct amdgpu_device *adev = drm_to_adev(dev);
237 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
243 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
245 * @dev: drm_device pointer
247 * Returns true if the device is a dGPU with ACPI power control,
248 * otherwise return false.
250 bool amdgpu_device_supports_boco(struct drm_device *dev)
252 struct amdgpu_device *adev = drm_to_adev(dev);
255 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
261 * amdgpu_device_supports_baco - Does the device support BACO
263 * @dev: drm_device pointer
265 * Returns true if the device supporte BACO,
266 * otherwise return false.
268 bool amdgpu_device_supports_baco(struct drm_device *dev)
270 struct amdgpu_device *adev = drm_to_adev(dev);
272 return amdgpu_asic_supports_baco(adev);
276 * amdgpu_device_supports_smart_shift - Is the device dGPU with
277 * smart shift support
279 * @dev: drm_device pointer
281 * Returns true if the device is a dGPU with Smart Shift support,
282 * otherwise returns false.
284 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
286 return (amdgpu_device_supports_boco(dev) &&
287 amdgpu_acpi_is_power_shift_control_supported());
291 * VRAM access helper functions
295 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
297 * @adev: amdgpu_device pointer
298 * @pos: offset of the buffer in vram
299 * @buf: virtual address of the buffer in system memory
300 * @size: read/write size, sizeof(@buf) must > @size
301 * @write: true - write to vram, otherwise - read from vram
303 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
304 void *buf, size_t size, bool write)
307 uint32_t hi = ~0, tmp = 0;
308 uint32_t *data = buf;
312 if (!drm_dev_enter(adev_to_drm(adev), &idx))
315 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
317 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
318 for (last = pos + size; pos < last; pos += 4) {
321 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
323 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
327 WREG32_NO_KIQ(mmMM_DATA, *data++);
329 *data++ = RREG32_NO_KIQ(mmMM_DATA);
332 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
337 * amdgpu_device_aper_access - access vram by vram aperature
339 * @adev: amdgpu_device pointer
340 * @pos: offset of the buffer in vram
341 * @buf: virtual address of the buffer in system memory
342 * @size: read/write size, sizeof(@buf) must > @size
343 * @write: true - write to vram, otherwise - read from vram
345 * The return value means how many bytes have been transferred.
347 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
348 void *buf, size_t size, bool write)
355 if (!adev->mman.aper_base_kaddr)
358 last = min(pos + size, adev->gmc.visible_vram_size);
360 addr = adev->mman.aper_base_kaddr + pos;
364 memcpy_toio(addr, buf, count);
366 amdgpu_device_flush_hdp(adev, NULL);
368 amdgpu_device_invalidate_hdp(adev, NULL);
370 memcpy_fromio(buf, addr, count);
382 * amdgpu_device_vram_access - read/write a buffer in vram
384 * @adev: amdgpu_device pointer
385 * @pos: offset of the buffer in vram
386 * @buf: virtual address of the buffer in system memory
387 * @size: read/write size, sizeof(@buf) must > @size
388 * @write: true - write to vram, otherwise - read from vram
390 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
391 void *buf, size_t size, bool write)
395 /* try to using vram apreature to access vram first */
396 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
399 /* using MM to access rest vram */
402 amdgpu_device_mm_access(adev, pos, buf, size, write);
407 * register access helper functions.
410 /* Check if hw access should be skipped because of hotplug or device error */
411 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
413 if (adev->no_hw_access)
416 #ifdef CONFIG_LOCKDEP
418 * This is a bit complicated to understand, so worth a comment. What we assert
419 * here is that the GPU reset is not running on another thread in parallel.
421 * For this we trylock the read side of the reset semaphore, if that succeeds
422 * we know that the reset is not running in paralell.
424 * If the trylock fails we assert that we are either already holding the read
425 * side of the lock or are the reset thread itself and hold the write side of
429 if (down_read_trylock(&adev->reset_sem))
430 up_read(&adev->reset_sem);
432 lockdep_assert_held(&adev->reset_sem);
439 * amdgpu_device_rreg - read a memory mapped IO or indirect register
441 * @adev: amdgpu_device pointer
442 * @reg: dword aligned register offset
443 * @acc_flags: access flags which require special behavior
445 * Returns the 32 bit value from the offset specified.
447 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
448 uint32_t reg, uint32_t acc_flags)
452 if (amdgpu_device_skip_hw_access(adev))
455 if ((reg * 4) < adev->rmmio_size) {
456 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
457 amdgpu_sriov_runtime(adev) &&
458 down_read_trylock(&adev->reset_sem)) {
459 ret = amdgpu_kiq_rreg(adev, reg);
460 up_read(&adev->reset_sem);
462 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
465 ret = adev->pcie_rreg(adev, reg * 4);
468 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
474 * MMIO register read with bytes helper functions
475 * @offset:bytes offset from MMIO start
480 * amdgpu_mm_rreg8 - read a memory mapped IO register
482 * @adev: amdgpu_device pointer
483 * @offset: byte aligned register offset
485 * Returns the 8 bit value from the offset specified.
487 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
489 if (amdgpu_device_skip_hw_access(adev))
492 if (offset < adev->rmmio_size)
493 return (readb(adev->rmmio + offset));
498 * MMIO register write with bytes helper functions
499 * @offset:bytes offset from MMIO start
500 * @value: the value want to be written to the register
504 * amdgpu_mm_wreg8 - read a memory mapped IO register
506 * @adev: amdgpu_device pointer
507 * @offset: byte aligned register offset
508 * @value: 8 bit value to write
510 * Writes the value specified to the offset specified.
512 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
514 if (amdgpu_device_skip_hw_access(adev))
517 if (offset < adev->rmmio_size)
518 writeb(value, adev->rmmio + offset);
524 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
526 * @adev: amdgpu_device pointer
527 * @reg: dword aligned register offset
528 * @v: 32 bit value to write to the register
529 * @acc_flags: access flags which require special behavior
531 * Writes the value specified to the offset specified.
533 void amdgpu_device_wreg(struct amdgpu_device *adev,
534 uint32_t reg, uint32_t v,
537 if (amdgpu_device_skip_hw_access(adev))
540 if ((reg * 4) < adev->rmmio_size) {
541 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
542 amdgpu_sriov_runtime(adev) &&
543 down_read_trylock(&adev->reset_sem)) {
544 amdgpu_kiq_wreg(adev, reg, v);
545 up_read(&adev->reset_sem);
547 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
550 adev->pcie_wreg(adev, reg * 4, v);
553 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
557 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
559 * @adev: amdgpu_device pointer
560 * @reg: mmio/rlc register
563 * this function is invoked only for the debugfs register access
565 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
566 uint32_t reg, uint32_t v)
568 if (amdgpu_device_skip_hw_access(adev))
571 if (amdgpu_sriov_fullaccess(adev) &&
572 adev->gfx.rlc.funcs &&
573 adev->gfx.rlc.funcs->is_rlcg_access_range) {
574 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
575 return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
576 } else if ((reg * 4) >= adev->rmmio_size) {
577 adev->pcie_wreg(adev, reg * 4, v);
579 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
584 * amdgpu_mm_rdoorbell - read a doorbell dword
586 * @adev: amdgpu_device pointer
587 * @index: doorbell index
589 * Returns the value in the doorbell aperture at the
590 * requested doorbell index (CIK).
592 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
594 if (amdgpu_device_skip_hw_access(adev))
597 if (index < adev->doorbell.num_doorbells) {
598 return readl(adev->doorbell.ptr + index);
600 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
606 * amdgpu_mm_wdoorbell - write a doorbell dword
608 * @adev: amdgpu_device pointer
609 * @index: doorbell index
612 * Writes @v to the doorbell aperture at the
613 * requested doorbell index (CIK).
615 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
617 if (amdgpu_device_skip_hw_access(adev))
620 if (index < adev->doorbell.num_doorbells) {
621 writel(v, adev->doorbell.ptr + index);
623 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
628 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
630 * @adev: amdgpu_device pointer
631 * @index: doorbell index
633 * Returns the value in the doorbell aperture at the
634 * requested doorbell index (VEGA10+).
636 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
638 if (amdgpu_device_skip_hw_access(adev))
641 if (index < adev->doorbell.num_doorbells) {
642 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
644 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
650 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
652 * @adev: amdgpu_device pointer
653 * @index: doorbell index
656 * Writes @v to the doorbell aperture at the
657 * requested doorbell index (VEGA10+).
659 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
661 if (amdgpu_device_skip_hw_access(adev))
664 if (index < adev->doorbell.num_doorbells) {
665 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
667 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
672 * amdgpu_device_indirect_rreg - read an indirect register
674 * @adev: amdgpu_device pointer
675 * @pcie_index: mmio register offset
676 * @pcie_data: mmio register offset
677 * @reg_addr: indirect register address to read from
679 * Returns the value of indirect register @reg_addr
681 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
682 u32 pcie_index, u32 pcie_data,
687 void __iomem *pcie_index_offset;
688 void __iomem *pcie_data_offset;
690 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
691 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
692 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
694 writel(reg_addr, pcie_index_offset);
695 readl(pcie_index_offset);
696 r = readl(pcie_data_offset);
697 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
703 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
705 * @adev: amdgpu_device pointer
706 * @pcie_index: mmio register offset
707 * @pcie_data: mmio register offset
708 * @reg_addr: indirect register address to read from
710 * Returns the value of indirect register @reg_addr
712 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
713 u32 pcie_index, u32 pcie_data,
718 void __iomem *pcie_index_offset;
719 void __iomem *pcie_data_offset;
721 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
722 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
723 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
725 /* read low 32 bits */
726 writel(reg_addr, pcie_index_offset);
727 readl(pcie_index_offset);
728 r = readl(pcie_data_offset);
729 /* read high 32 bits */
730 writel(reg_addr + 4, pcie_index_offset);
731 readl(pcie_index_offset);
732 r |= ((u64)readl(pcie_data_offset) << 32);
733 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
739 * amdgpu_device_indirect_wreg - write an indirect register address
741 * @adev: amdgpu_device pointer
742 * @pcie_index: mmio register offset
743 * @pcie_data: mmio register offset
744 * @reg_addr: indirect register offset
745 * @reg_data: indirect register data
748 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
749 u32 pcie_index, u32 pcie_data,
750 u32 reg_addr, u32 reg_data)
753 void __iomem *pcie_index_offset;
754 void __iomem *pcie_data_offset;
756 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
757 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
758 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
760 writel(reg_addr, pcie_index_offset);
761 readl(pcie_index_offset);
762 writel(reg_data, pcie_data_offset);
763 readl(pcie_data_offset);
764 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
768 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
770 * @adev: amdgpu_device pointer
771 * @pcie_index: mmio register offset
772 * @pcie_data: mmio register offset
773 * @reg_addr: indirect register offset
774 * @reg_data: indirect register data
777 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
778 u32 pcie_index, u32 pcie_data,
779 u32 reg_addr, u64 reg_data)
782 void __iomem *pcie_index_offset;
783 void __iomem *pcie_data_offset;
785 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
786 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
787 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
789 /* write low 32 bits */
790 writel(reg_addr, pcie_index_offset);
791 readl(pcie_index_offset);
792 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
793 readl(pcie_data_offset);
794 /* write high 32 bits */
795 writel(reg_addr + 4, pcie_index_offset);
796 readl(pcie_index_offset);
797 writel((u32)(reg_data >> 32), pcie_data_offset);
798 readl(pcie_data_offset);
799 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
803 * amdgpu_invalid_rreg - dummy reg read function
805 * @adev: amdgpu_device pointer
806 * @reg: offset of register
808 * Dummy register read function. Used for register blocks
809 * that certain asics don't have (all asics).
810 * Returns the value in the register.
812 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
814 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
820 * amdgpu_invalid_wreg - dummy reg write function
822 * @adev: amdgpu_device pointer
823 * @reg: offset of register
824 * @v: value to write to the register
826 * Dummy register read function. Used for register blocks
827 * that certain asics don't have (all asics).
829 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
831 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
837 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
839 * @adev: amdgpu_device pointer
840 * @reg: offset of register
842 * Dummy register read function. Used for register blocks
843 * that certain asics don't have (all asics).
844 * Returns the value in the register.
846 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
848 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
854 * amdgpu_invalid_wreg64 - dummy reg write function
856 * @adev: amdgpu_device pointer
857 * @reg: offset of register
858 * @v: value to write to the register
860 * Dummy register read function. Used for register blocks
861 * that certain asics don't have (all asics).
863 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
865 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
871 * amdgpu_block_invalid_rreg - dummy reg read function
873 * @adev: amdgpu_device pointer
874 * @block: offset of instance
875 * @reg: offset of register
877 * Dummy register read function. Used for register blocks
878 * that certain asics don't have (all asics).
879 * Returns the value in the register.
881 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
882 uint32_t block, uint32_t reg)
884 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
891 * amdgpu_block_invalid_wreg - dummy reg write function
893 * @adev: amdgpu_device pointer
894 * @block: offset of instance
895 * @reg: offset of register
896 * @v: value to write to the register
898 * Dummy register read function. Used for register blocks
899 * that certain asics don't have (all asics).
901 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
903 uint32_t reg, uint32_t v)
905 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
911 * amdgpu_device_asic_init - Wrapper for atom asic_init
913 * @adev: amdgpu_device pointer
915 * Does any asic specific work and then calls atom asic init.
917 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
919 amdgpu_asic_pre_asic_init(adev);
921 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
925 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927 * @adev: amdgpu_device pointer
929 * Allocates a scratch page of VRAM for use by various things in the
932 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
935 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
936 &adev->vram_scratch.robj,
937 &adev->vram_scratch.gpu_addr,
938 (void **)&adev->vram_scratch.ptr);
942 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944 * @adev: amdgpu_device pointer
946 * Frees the VRAM scratch page.
948 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
954 * amdgpu_device_program_register_sequence - program an array of registers.
956 * @adev: amdgpu_device pointer
957 * @registers: pointer to the register array
958 * @array_size: size of the register array
960 * Programs an array or registers with and and or masks.
961 * This is a helper for setting golden registers.
963 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
964 const u32 *registers,
965 const u32 array_size)
967 u32 tmp, reg, and_mask, or_mask;
973 for (i = 0; i < array_size; i +=3) {
974 reg = registers[i + 0];
975 and_mask = registers[i + 1];
976 or_mask = registers[i + 2];
978 if (and_mask == 0xffffffff) {
983 if (adev->family >= AMDGPU_FAMILY_AI)
984 tmp |= (or_mask & and_mask);
993 * amdgpu_device_pci_config_reset - reset the GPU
995 * @adev: amdgpu_device pointer
997 * Resets the GPU using the pci config reset sequence.
998 * Only applicable to asics prior to vega10.
1000 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1006 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008 * @adev: amdgpu_device pointer
1010 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 return pci_reset_function(adev->pdev);
1018 * GPU doorbell aperture helpers function.
1021 * amdgpu_device_doorbell_init - Init doorbell driver information.
1023 * @adev: amdgpu_device pointer
1025 * Init doorbell driver information (CIK)
1026 * Returns 0 on success, error on failure.
1028 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1031 /* No doorbell on SI hardware generation */
1032 if (adev->asic_type < CHIP_BONAIRE) {
1033 adev->doorbell.base = 0;
1034 adev->doorbell.size = 0;
1035 adev->doorbell.num_doorbells = 0;
1036 adev->doorbell.ptr = NULL;
1040 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1043 amdgpu_asic_init_doorbell_index(adev);
1045 /* doorbell bar mapping */
1046 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1047 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1049 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1050 adev->doorbell_index.max_assignment+1);
1051 if (adev->doorbell.num_doorbells == 0)
1054 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1055 * paging queue doorbell use the second page. The
1056 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1057 * doorbells are in the first page. So with paging queue enabled,
1058 * the max num_doorbells should + 1 page (0x400 in dword)
1060 if (adev->asic_type >= CHIP_VEGA10)
1061 adev->doorbell.num_doorbells += 0x400;
1063 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1064 adev->doorbell.num_doorbells *
1066 if (adev->doorbell.ptr == NULL)
1073 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1075 * @adev: amdgpu_device pointer
1077 * Tear down doorbell driver information (CIK)
1079 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1081 iounmap(adev->doorbell.ptr);
1082 adev->doorbell.ptr = NULL;
1088 * amdgpu_device_wb_*()
1089 * Writeback is the method by which the GPU updates special pages in memory
1090 * with the status of certain GPU events (fences, ring pointers,etc.).
1094 * amdgpu_device_wb_fini - Disable Writeback and free memory
1096 * @adev: amdgpu_device pointer
1098 * Disables Writeback and frees the Writeback memory (all asics).
1099 * Used at driver shutdown.
1101 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1103 if (adev->wb.wb_obj) {
1104 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1106 (void **)&adev->wb.wb);
1107 adev->wb.wb_obj = NULL;
1112 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1114 * @adev: amdgpu_device pointer
1116 * Initializes writeback and allocates writeback memory (all asics).
1117 * Used at driver startup.
1118 * Returns 0 on success or an -error on failure.
1120 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1124 if (adev->wb.wb_obj == NULL) {
1125 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1126 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1127 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1128 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1129 (void **)&adev->wb.wb);
1131 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1135 adev->wb.num_wb = AMDGPU_MAX_WB;
1136 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1138 /* clear wb memory */
1139 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1146 * amdgpu_device_wb_get - Allocate a wb entry
1148 * @adev: amdgpu_device pointer
1151 * Allocate a wb slot for use by the driver (all asics).
1152 * Returns 0 on success or -EINVAL on failure.
1154 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1156 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1158 if (offset < adev->wb.num_wb) {
1159 __set_bit(offset, adev->wb.used);
1160 *wb = offset << 3; /* convert to dw offset */
1168 * amdgpu_device_wb_free - Free a wb entry
1170 * @adev: amdgpu_device pointer
1173 * Free a wb slot allocated for use by the driver (all asics)
1175 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1178 if (wb < adev->wb.num_wb)
1179 __clear_bit(wb, adev->wb.used);
1183 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1185 * @adev: amdgpu_device pointer
1187 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1188 * to fail, but if any of the BARs is not accessible after the size we abort
1189 * driver loading by returning -ENODEV.
1191 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1193 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1194 struct pci_bus *root;
1195 struct resource *res;
1201 if (amdgpu_sriov_vf(adev))
1204 /* skip if the bios has already enabled large BAR */
1205 if (adev->gmc.real_vram_size &&
1206 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1209 /* Check if the root BUS has 64bit memory resources */
1210 root = adev->pdev->bus;
1211 while (root->parent)
1212 root = root->parent;
1214 pci_bus_for_each_resource(root, res, i) {
1215 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1216 res->start > 0x100000000ull)
1220 /* Trying to resize is pointless without a root hub window above 4GB */
1224 /* Limit the BAR size to what is available */
1225 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1228 /* Disable memory decoding while we change the BAR addresses and size */
1229 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1230 pci_write_config_word(adev->pdev, PCI_COMMAND,
1231 cmd & ~PCI_COMMAND_MEMORY);
1233 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1234 amdgpu_device_doorbell_fini(adev);
1235 if (adev->asic_type >= CHIP_BONAIRE)
1236 pci_release_resource(adev->pdev, 2);
1238 pci_release_resource(adev->pdev, 0);
1240 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1242 DRM_INFO("Not enough PCI address space for a large BAR.");
1243 else if (r && r != -ENOTSUPP)
1244 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1246 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1248 /* When the doorbell or fb BAR isn't available we have no chance of
1251 r = amdgpu_device_doorbell_init(adev);
1252 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1255 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1261 * GPU helpers function.
1264 * amdgpu_device_need_post - check if the hw need post or not
1266 * @adev: amdgpu_device pointer
1268 * Check if the asic has been initialized (all asics) at driver startup
1269 * or post is needed if hw reset is performed.
1270 * Returns true if need or false if not.
1272 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1276 if (amdgpu_sriov_vf(adev))
1279 if (amdgpu_passthrough(adev)) {
1280 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1281 * some old smc fw still need driver do vPost otherwise gpu hang, while
1282 * those smc fw version above 22.15 doesn't have this flaw, so we force
1283 * vpost executed for smc version below 22.15
1285 if (adev->asic_type == CHIP_FIJI) {
1288 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1289 /* force vPost if error occured */
1293 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1294 if (fw_ver < 0x00160e00)
1299 /* Don't post if we need to reset whole hive on init */
1300 if (adev->gmc.xgmi.pending_reset)
1303 if (adev->has_hw_reset) {
1304 adev->has_hw_reset = false;
1308 /* bios scratch used on CIK+ */
1309 if (adev->asic_type >= CHIP_BONAIRE)
1310 return amdgpu_atombios_scratch_need_asic_init(adev);
1312 /* check MEM_SIZE for older asics */
1313 reg = amdgpu_asic_get_config_memsize(adev);
1315 if ((reg != 0) && (reg != 0xffffffff))
1322 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1324 * @adev: amdgpu_device pointer
1326 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1327 * be set for this device.
1329 * Returns true if it should be used or false if not.
1331 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1333 switch (amdgpu_aspm) {
1343 return pcie_aspm_enabled(adev->pdev);
1346 /* if we get transitioned to only one device, take VGA back */
1348 * amdgpu_device_vga_set_decode - enable/disable vga decode
1350 * @pdev: PCI device pointer
1351 * @state: enable/disable vga decode
1353 * Enable/disable vga decode (all asics).
1354 * Returns VGA resource flags.
1356 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1359 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1360 amdgpu_asic_set_vga_state(adev, state);
1362 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1363 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1365 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1369 * amdgpu_device_check_block_size - validate the vm block size
1371 * @adev: amdgpu_device pointer
1373 * Validates the vm block size specified via module parameter.
1374 * The vm block size defines number of bits in page table versus page directory,
1375 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1376 * page table and the remaining bits are in the page directory.
1378 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1380 /* defines number of bits in page table versus page directory,
1381 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1382 * page table and the remaining bits are in the page directory */
1383 if (amdgpu_vm_block_size == -1)
1386 if (amdgpu_vm_block_size < 9) {
1387 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1388 amdgpu_vm_block_size);
1389 amdgpu_vm_block_size = -1;
1394 * amdgpu_device_check_vm_size - validate the vm size
1396 * @adev: amdgpu_device pointer
1398 * Validates the vm size in GB specified via module parameter.
1399 * The VM size is the size of the GPU virtual memory space in GB.
1401 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1403 /* no need to check the default value */
1404 if (amdgpu_vm_size == -1)
1407 if (amdgpu_vm_size < 1) {
1408 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1410 amdgpu_vm_size = -1;
1414 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1417 bool is_os_64 = (sizeof(void *) == 8);
1418 uint64_t total_memory;
1419 uint64_t dram_size_seven_GB = 0x1B8000000;
1420 uint64_t dram_size_three_GB = 0xB8000000;
1422 if (amdgpu_smu_memory_pool_size == 0)
1426 DRM_WARN("Not 64-bit OS, feature not supported\n");
1430 total_memory = (uint64_t)si.totalram * si.mem_unit;
1432 if ((amdgpu_smu_memory_pool_size == 1) ||
1433 (amdgpu_smu_memory_pool_size == 2)) {
1434 if (total_memory < dram_size_three_GB)
1436 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1437 (amdgpu_smu_memory_pool_size == 8)) {
1438 if (total_memory < dram_size_seven_GB)
1441 DRM_WARN("Smu memory pool size not supported\n");
1444 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1449 DRM_WARN("No enough system memory\n");
1451 adev->pm.smu_prv_buffer_size = 0;
1454 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1456 if (!(adev->flags & AMD_IS_APU) ||
1457 adev->asic_type < CHIP_RAVEN)
1460 switch (adev->asic_type) {
1462 if (adev->pdev->device == 0x15dd)
1463 adev->apu_flags |= AMD_APU_IS_RAVEN;
1464 if (adev->pdev->device == 0x15d8)
1465 adev->apu_flags |= AMD_APU_IS_PICASSO;
1468 if ((adev->pdev->device == 0x1636) ||
1469 (adev->pdev->device == 0x164c))
1470 adev->apu_flags |= AMD_APU_IS_RENOIR;
1472 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1475 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1477 case CHIP_YELLOW_CARP:
1479 case CHIP_CYAN_SKILLFISH:
1480 if ((adev->pdev->device == 0x13FE) ||
1481 (adev->pdev->device == 0x143F))
1482 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1492 * amdgpu_device_check_arguments - validate module params
1494 * @adev: amdgpu_device pointer
1496 * Validates certain module parameters and updates
1497 * the associated values used by the driver (all asics).
1499 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1501 if (amdgpu_sched_jobs < 4) {
1502 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1504 amdgpu_sched_jobs = 4;
1505 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1506 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1508 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1511 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1512 /* gart size must be greater or equal to 32M */
1513 dev_warn(adev->dev, "gart size (%d) too small\n",
1515 amdgpu_gart_size = -1;
1518 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1519 /* gtt size must be greater or equal to 32M */
1520 dev_warn(adev->dev, "gtt size (%d) too small\n",
1522 amdgpu_gtt_size = -1;
1525 /* valid range is between 4 and 9 inclusive */
1526 if (amdgpu_vm_fragment_size != -1 &&
1527 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1528 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1529 amdgpu_vm_fragment_size = -1;
1532 if (amdgpu_sched_hw_submission < 2) {
1533 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1534 amdgpu_sched_hw_submission);
1535 amdgpu_sched_hw_submission = 2;
1536 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1537 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1538 amdgpu_sched_hw_submission);
1539 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1542 amdgpu_device_check_smu_prv_buffer_size(adev);
1544 amdgpu_device_check_vm_size(adev);
1546 amdgpu_device_check_block_size(adev);
1548 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1550 amdgpu_gmc_tmz_set(adev);
1552 amdgpu_gmc_noretry_set(adev);
1558 * amdgpu_switcheroo_set_state - set switcheroo state
1560 * @pdev: pci dev pointer
1561 * @state: vga_switcheroo state
1563 * Callback for the switcheroo driver. Suspends or resumes the
1564 * the asics before or after it is powered up using ACPI methods.
1566 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1567 enum vga_switcheroo_state state)
1569 struct drm_device *dev = pci_get_drvdata(pdev);
1572 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1575 if (state == VGA_SWITCHEROO_ON) {
1576 pr_info("switched on\n");
1577 /* don't suspend or resume card normally */
1578 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1580 pci_set_power_state(pdev, PCI_D0);
1581 amdgpu_device_load_pci_state(pdev);
1582 r = pci_enable_device(pdev);
1584 DRM_WARN("pci_enable_device failed (%d)\n", r);
1585 amdgpu_device_resume(dev, true);
1587 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1589 pr_info("switched off\n");
1590 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1591 amdgpu_device_suspend(dev, true);
1592 amdgpu_device_cache_pci_state(pdev);
1593 /* Shut down the device */
1594 pci_disable_device(pdev);
1595 pci_set_power_state(pdev, PCI_D3cold);
1596 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1601 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1603 * @pdev: pci dev pointer
1605 * Callback for the switcheroo driver. Check of the switcheroo
1606 * state can be changed.
1607 * Returns true if the state can be changed, false if not.
1609 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1611 struct drm_device *dev = pci_get_drvdata(pdev);
1614 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1615 * locking inversion with the driver load path. And the access here is
1616 * completely racy anyway. So don't bother with locking for now.
1618 return atomic_read(&dev->open_count) == 0;
1621 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1622 .set_gpu_state = amdgpu_switcheroo_set_state,
1624 .can_switch = amdgpu_switcheroo_can_switch,
1628 * amdgpu_device_ip_set_clockgating_state - set the CG state
1630 * @dev: amdgpu_device pointer
1631 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1632 * @state: clockgating state (gate or ungate)
1634 * Sets the requested clockgating state for all instances of
1635 * the hardware IP specified.
1636 * Returns the error code from the last instance.
1638 int amdgpu_device_ip_set_clockgating_state(void *dev,
1639 enum amd_ip_block_type block_type,
1640 enum amd_clockgating_state state)
1642 struct amdgpu_device *adev = dev;
1645 for (i = 0; i < adev->num_ip_blocks; i++) {
1646 if (!adev->ip_blocks[i].status.valid)
1648 if (adev->ip_blocks[i].version->type != block_type)
1650 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1652 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1653 (void *)adev, state);
1655 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1656 adev->ip_blocks[i].version->funcs->name, r);
1662 * amdgpu_device_ip_set_powergating_state - set the PG state
1664 * @dev: amdgpu_device pointer
1665 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1666 * @state: powergating state (gate or ungate)
1668 * Sets the requested powergating state for all instances of
1669 * the hardware IP specified.
1670 * Returns the error code from the last instance.
1672 int amdgpu_device_ip_set_powergating_state(void *dev,
1673 enum amd_ip_block_type block_type,
1674 enum amd_powergating_state state)
1676 struct amdgpu_device *adev = dev;
1679 for (i = 0; i < adev->num_ip_blocks; i++) {
1680 if (!adev->ip_blocks[i].status.valid)
1682 if (adev->ip_blocks[i].version->type != block_type)
1684 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1686 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1687 (void *)adev, state);
1689 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1690 adev->ip_blocks[i].version->funcs->name, r);
1696 * amdgpu_device_ip_get_clockgating_state - get the CG state
1698 * @adev: amdgpu_device pointer
1699 * @flags: clockgating feature flags
1701 * Walks the list of IPs on the device and updates the clockgating
1702 * flags for each IP.
1703 * Updates @flags with the feature flags for each hardware IP where
1704 * clockgating is enabled.
1706 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1711 for (i = 0; i < adev->num_ip_blocks; i++) {
1712 if (!adev->ip_blocks[i].status.valid)
1714 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1715 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1720 * amdgpu_device_ip_wait_for_idle - wait for idle
1722 * @adev: amdgpu_device pointer
1723 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1725 * Waits for the request hardware IP to be idle.
1726 * Returns 0 for success or a negative error code on failure.
1728 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1729 enum amd_ip_block_type block_type)
1733 for (i = 0; i < adev->num_ip_blocks; i++) {
1734 if (!adev->ip_blocks[i].status.valid)
1736 if (adev->ip_blocks[i].version->type == block_type) {
1737 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1748 * amdgpu_device_ip_is_idle - is the hardware IP idle
1750 * @adev: amdgpu_device pointer
1751 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1753 * Check if the hardware IP is idle or not.
1754 * Returns true if it the IP is idle, false if not.
1756 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1757 enum amd_ip_block_type block_type)
1761 for (i = 0; i < adev->num_ip_blocks; i++) {
1762 if (!adev->ip_blocks[i].status.valid)
1764 if (adev->ip_blocks[i].version->type == block_type)
1765 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1772 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1774 * @adev: amdgpu_device pointer
1775 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1777 * Returns a pointer to the hardware IP block structure
1778 * if it exists for the asic, otherwise NULL.
1780 struct amdgpu_ip_block *
1781 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1782 enum amd_ip_block_type type)
1786 for (i = 0; i < adev->num_ip_blocks; i++)
1787 if (adev->ip_blocks[i].version->type == type)
1788 return &adev->ip_blocks[i];
1794 * amdgpu_device_ip_block_version_cmp
1796 * @adev: amdgpu_device pointer
1797 * @type: enum amd_ip_block_type
1798 * @major: major version
1799 * @minor: minor version
1801 * return 0 if equal or greater
1802 * return 1 if smaller or the ip_block doesn't exist
1804 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1805 enum amd_ip_block_type type,
1806 u32 major, u32 minor)
1808 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1810 if (ip_block && ((ip_block->version->major > major) ||
1811 ((ip_block->version->major == major) &&
1812 (ip_block->version->minor >= minor))))
1819 * amdgpu_device_ip_block_add
1821 * @adev: amdgpu_device pointer
1822 * @ip_block_version: pointer to the IP to add
1824 * Adds the IP block driver information to the collection of IPs
1827 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1828 const struct amdgpu_ip_block_version *ip_block_version)
1830 if (!ip_block_version)
1833 switch (ip_block_version->type) {
1834 case AMD_IP_BLOCK_TYPE_VCN:
1835 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1838 case AMD_IP_BLOCK_TYPE_JPEG:
1839 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1846 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1847 ip_block_version->funcs->name);
1849 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1855 * amdgpu_device_enable_virtual_display - enable virtual display feature
1857 * @adev: amdgpu_device pointer
1859 * Enabled the virtual display feature if the user has enabled it via
1860 * the module parameter virtual_display. This feature provides a virtual
1861 * display hardware on headless boards or in virtualized environments.
1862 * This function parses and validates the configuration string specified by
1863 * the user and configues the virtual display configuration (number of
1864 * virtual connectors, crtcs, etc.) specified.
1866 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1868 adev->enable_virtual_display = false;
1870 if (amdgpu_virtual_display) {
1871 const char *pci_address_name = pci_name(adev->pdev);
1872 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1874 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1875 pciaddstr_tmp = pciaddstr;
1876 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1877 pciaddname = strsep(&pciaddname_tmp, ",");
1878 if (!strcmp("all", pciaddname)
1879 || !strcmp(pci_address_name, pciaddname)) {
1883 adev->enable_virtual_display = true;
1886 res = kstrtol(pciaddname_tmp, 10,
1894 adev->mode_info.num_crtc = num_crtc;
1896 adev->mode_info.num_crtc = 1;
1902 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1903 amdgpu_virtual_display, pci_address_name,
1904 adev->enable_virtual_display, adev->mode_info.num_crtc);
1911 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1913 * @adev: amdgpu_device pointer
1915 * Parses the asic configuration parameters specified in the gpu info
1916 * firmware and makes them availale to the driver for use in configuring
1918 * Returns 0 on success, -EINVAL on failure.
1920 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1922 const char *chip_name;
1925 const struct gpu_info_firmware_header_v1_0 *hdr;
1927 adev->firmware.gpu_info_fw = NULL;
1929 if (adev->mman.discovery_bin) {
1930 amdgpu_discovery_get_gfx_info(adev);
1933 * FIXME: The bounding box is still needed by Navi12, so
1934 * temporarily read it from gpu_info firmware. Should be droped
1935 * when DAL no longer needs it.
1937 if (adev->asic_type != CHIP_NAVI12)
1941 switch (adev->asic_type) {
1942 #ifdef CONFIG_DRM_AMDGPU_SI
1949 #ifdef CONFIG_DRM_AMDGPU_CIK
1959 case CHIP_POLARIS10:
1960 case CHIP_POLARIS11:
1961 case CHIP_POLARIS12:
1966 case CHIP_ALDEBARAN:
1967 case CHIP_SIENNA_CICHLID:
1968 case CHIP_NAVY_FLOUNDER:
1969 case CHIP_DIMGREY_CAVEFISH:
1970 case CHIP_BEIGE_GOBY:
1974 chip_name = "vega10";
1977 chip_name = "vega12";
1980 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1981 chip_name = "raven2";
1982 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1983 chip_name = "picasso";
1985 chip_name = "raven";
1988 chip_name = "arcturus";
1991 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1992 chip_name = "renoir";
1994 chip_name = "green_sardine";
1997 chip_name = "navi10";
2000 chip_name = "navi14";
2003 chip_name = "navi12";
2006 chip_name = "vangogh";
2008 case CHIP_YELLOW_CARP:
2009 chip_name = "yellow_carp";
2013 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2014 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
2017 "Failed to load gpu_info firmware \"%s\"\n",
2021 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
2024 "Failed to validate gpu_info firmware \"%s\"\n",
2029 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2030 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2032 switch (hdr->version_major) {
2035 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2036 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2037 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2040 * Should be droped when DAL no longer needs it.
2042 if (adev->asic_type == CHIP_NAVI12)
2043 goto parse_soc_bounding_box;
2045 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2046 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2047 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2048 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2049 adev->gfx.config.max_texture_channel_caches =
2050 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2051 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2052 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2053 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2054 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2055 adev->gfx.config.double_offchip_lds_buf =
2056 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2057 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2058 adev->gfx.cu_info.max_waves_per_simd =
2059 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2060 adev->gfx.cu_info.max_scratch_slots_per_cu =
2061 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2062 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2063 if (hdr->version_minor >= 1) {
2064 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2065 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2066 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2067 adev->gfx.config.num_sc_per_sh =
2068 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2069 adev->gfx.config.num_packer_per_sc =
2070 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2073 parse_soc_bounding_box:
2075 * soc bounding box info is not integrated in disocovery table,
2076 * we always need to parse it from gpu info firmware if needed.
2078 if (hdr->version_minor == 2) {
2079 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2080 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2081 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2082 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2088 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2097 * amdgpu_device_ip_early_init - run early init for hardware IPs
2099 * @adev: amdgpu_device pointer
2101 * Early initialization pass for hardware IPs. The hardware IPs that make
2102 * up each asic are discovered each IP's early_init callback is run. This
2103 * is the first stage in initializing the asic.
2104 * Returns 0 on success, negative error code on failure.
2106 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2108 struct drm_device *dev = adev_to_drm(adev);
2109 struct pci_dev *parent;
2112 amdgpu_device_enable_virtual_display(adev);
2114 if (amdgpu_sriov_vf(adev)) {
2115 r = amdgpu_virt_request_full_gpu(adev, true);
2120 switch (adev->asic_type) {
2121 #ifdef CONFIG_DRM_AMDGPU_SI
2127 adev->family = AMDGPU_FAMILY_SI;
2128 r = si_set_ip_blocks(adev);
2133 #ifdef CONFIG_DRM_AMDGPU_CIK
2139 if (adev->flags & AMD_IS_APU)
2140 adev->family = AMDGPU_FAMILY_KV;
2142 adev->family = AMDGPU_FAMILY_CI;
2144 r = cik_set_ip_blocks(adev);
2152 case CHIP_POLARIS10:
2153 case CHIP_POLARIS11:
2154 case CHIP_POLARIS12:
2158 if (adev->flags & AMD_IS_APU)
2159 adev->family = AMDGPU_FAMILY_CZ;
2161 adev->family = AMDGPU_FAMILY_VI;
2163 r = vi_set_ip_blocks(adev);
2168 r = amdgpu_discovery_set_ip_blocks(adev);
2174 if (amdgpu_has_atpx() &&
2175 (amdgpu_is_atpx_hybrid() ||
2176 amdgpu_has_atpx_dgpu_power_cntl()) &&
2177 ((adev->flags & AMD_IS_APU) == 0) &&
2178 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2179 adev->flags |= AMD_IS_PX;
2181 parent = pci_upstream_bridge(adev->pdev);
2182 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2184 amdgpu_amdkfd_device_probe(adev);
2186 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2187 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2188 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2189 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2190 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2192 for (i = 0; i < adev->num_ip_blocks; i++) {
2193 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2194 DRM_ERROR("disabled ip block: %d <%s>\n",
2195 i, adev->ip_blocks[i].version->funcs->name);
2196 adev->ip_blocks[i].status.valid = false;
2198 if (adev->ip_blocks[i].version->funcs->early_init) {
2199 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2201 adev->ip_blocks[i].status.valid = false;
2203 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2204 adev->ip_blocks[i].version->funcs->name, r);
2207 adev->ip_blocks[i].status.valid = true;
2210 adev->ip_blocks[i].status.valid = true;
2213 /* get the vbios after the asic_funcs are set up */
2214 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2215 r = amdgpu_device_parse_gpu_info_fw(adev);
2220 if (!amdgpu_get_bios(adev))
2223 r = amdgpu_atombios_init(adev);
2225 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2226 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2230 /*get pf2vf msg info at it's earliest time*/
2231 if (amdgpu_sriov_vf(adev))
2232 amdgpu_virt_init_data_exchange(adev);
2237 adev->cg_flags &= amdgpu_cg_mask;
2238 adev->pg_flags &= amdgpu_pg_mask;
2243 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2247 for (i = 0; i < adev->num_ip_blocks; i++) {
2248 if (!adev->ip_blocks[i].status.sw)
2250 if (adev->ip_blocks[i].status.hw)
2252 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2253 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2254 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2255 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2257 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2258 adev->ip_blocks[i].version->funcs->name, r);
2261 adev->ip_blocks[i].status.hw = true;
2268 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2272 for (i = 0; i < adev->num_ip_blocks; i++) {
2273 if (!adev->ip_blocks[i].status.sw)
2275 if (adev->ip_blocks[i].status.hw)
2277 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2279 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2280 adev->ip_blocks[i].version->funcs->name, r);
2283 adev->ip_blocks[i].status.hw = true;
2289 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2293 uint32_t smu_version;
2295 if (adev->asic_type >= CHIP_VEGA10) {
2296 for (i = 0; i < adev->num_ip_blocks; i++) {
2297 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2300 if (!adev->ip_blocks[i].status.sw)
2303 /* no need to do the fw loading again if already done*/
2304 if (adev->ip_blocks[i].status.hw == true)
2307 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2308 r = adev->ip_blocks[i].version->funcs->resume(adev);
2310 DRM_ERROR("resume of IP block <%s> failed %d\n",
2311 adev->ip_blocks[i].version->funcs->name, r);
2315 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2317 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2318 adev->ip_blocks[i].version->funcs->name, r);
2323 adev->ip_blocks[i].status.hw = true;
2328 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2329 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2335 * amdgpu_device_ip_init - run init for hardware IPs
2337 * @adev: amdgpu_device pointer
2339 * Main initialization pass for hardware IPs. The list of all the hardware
2340 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2341 * are run. sw_init initializes the software state associated with each IP
2342 * and hw_init initializes the hardware associated with each IP.
2343 * Returns 0 on success, negative error code on failure.
2345 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2349 r = amdgpu_ras_init(adev);
2353 for (i = 0; i < adev->num_ip_blocks; i++) {
2354 if (!adev->ip_blocks[i].status.valid)
2356 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2358 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2359 adev->ip_blocks[i].version->funcs->name, r);
2362 adev->ip_blocks[i].status.sw = true;
2364 /* need to do gmc hw init early so we can allocate gpu mem */
2365 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2366 /* Try to reserve bad pages early */
2367 if (amdgpu_sriov_vf(adev))
2368 amdgpu_virt_exchange_data(adev);
2370 r = amdgpu_device_vram_scratch_init(adev);
2372 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2375 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2377 DRM_ERROR("hw_init %d failed %d\n", i, r);
2380 r = amdgpu_device_wb_init(adev);
2382 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2385 adev->ip_blocks[i].status.hw = true;
2387 /* right after GMC hw init, we create CSA */
2388 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2389 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2390 AMDGPU_GEM_DOMAIN_VRAM,
2393 DRM_ERROR("allocate CSA failed %d\n", r);
2400 if (amdgpu_sriov_vf(adev))
2401 amdgpu_virt_init_data_exchange(adev);
2403 r = amdgpu_ib_pool_init(adev);
2405 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2406 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2410 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2414 r = amdgpu_device_ip_hw_init_phase1(adev);
2418 r = amdgpu_device_fw_loading(adev);
2422 r = amdgpu_device_ip_hw_init_phase2(adev);
2427 * retired pages will be loaded from eeprom and reserved here,
2428 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2429 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2430 * for I2C communication which only true at this point.
2432 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2433 * failure from bad gpu situation and stop amdgpu init process
2434 * accordingly. For other failed cases, it will still release all
2435 * the resource and print error message, rather than returning one
2436 * negative value to upper level.
2438 * Note: theoretically, this should be called before all vram allocations
2439 * to protect retired page from abusing
2441 r = amdgpu_ras_recovery_init(adev);
2445 if (adev->gmc.xgmi.num_physical_nodes > 1)
2446 amdgpu_xgmi_add_device(adev);
2448 /* Don't init kfd if whole hive need to be reset during init */
2449 if (!adev->gmc.xgmi.pending_reset)
2450 amdgpu_amdkfd_device_init(adev);
2452 amdgpu_fru_get_product_info(adev);
2455 if (amdgpu_sriov_vf(adev))
2456 amdgpu_virt_release_full_gpu(adev, true);
2462 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2464 * @adev: amdgpu_device pointer
2466 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2467 * this function before a GPU reset. If the value is retained after a
2468 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2470 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2472 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2476 * amdgpu_device_check_vram_lost - check if vram is valid
2478 * @adev: amdgpu_device pointer
2480 * Checks the reset magic value written to the gart pointer in VRAM.
2481 * The driver calls this after a GPU reset to see if the contents of
2482 * VRAM is lost or now.
2483 * returns true if vram is lost, false if not.
2485 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2487 if (memcmp(adev->gart.ptr, adev->reset_magic,
2488 AMDGPU_RESET_MAGIC_NUM))
2491 if (!amdgpu_in_reset(adev))
2495 * For all ASICs with baco/mode1 reset, the VRAM is
2496 * always assumed to be lost.
2498 switch (amdgpu_asic_reset_method(adev)) {
2499 case AMD_RESET_METHOD_BACO:
2500 case AMD_RESET_METHOD_MODE1:
2508 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2510 * @adev: amdgpu_device pointer
2511 * @state: clockgating state (gate or ungate)
2513 * The list of all the hardware IPs that make up the asic is walked and the
2514 * set_clockgating_state callbacks are run.
2515 * Late initialization pass enabling clockgating for hardware IPs.
2516 * Fini or suspend, pass disabling clockgating for hardware IPs.
2517 * Returns 0 on success, negative error code on failure.
2520 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2521 enum amd_clockgating_state state)
2525 if (amdgpu_emu_mode == 1)
2528 for (j = 0; j < adev->num_ip_blocks; j++) {
2529 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2530 if (!adev->ip_blocks[i].status.late_initialized)
2532 /* skip CG for GFX on S0ix */
2533 if (adev->in_s0ix &&
2534 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2536 /* skip CG for VCE/UVD, it's handled specially */
2537 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2538 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2539 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2540 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2541 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2542 /* enable clockgating to save power */
2543 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2546 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2547 adev->ip_blocks[i].version->funcs->name, r);
2556 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2557 enum amd_powergating_state state)
2561 if (amdgpu_emu_mode == 1)
2564 for (j = 0; j < adev->num_ip_blocks; j++) {
2565 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2566 if (!adev->ip_blocks[i].status.late_initialized)
2568 /* skip PG for GFX on S0ix */
2569 if (adev->in_s0ix &&
2570 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2572 /* skip CG for VCE/UVD, it's handled specially */
2573 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2574 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2575 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2576 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2577 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2578 /* enable powergating to save power */
2579 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2582 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2583 adev->ip_blocks[i].version->funcs->name, r);
2591 static int amdgpu_device_enable_mgpu_fan_boost(void)
2593 struct amdgpu_gpu_instance *gpu_ins;
2594 struct amdgpu_device *adev;
2597 mutex_lock(&mgpu_info.mutex);
2600 * MGPU fan boost feature should be enabled
2601 * only when there are two or more dGPUs in
2604 if (mgpu_info.num_dgpu < 2)
2607 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2608 gpu_ins = &(mgpu_info.gpu_ins[i]);
2609 adev = gpu_ins->adev;
2610 if (!(adev->flags & AMD_IS_APU) &&
2611 !gpu_ins->mgpu_fan_enabled) {
2612 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2616 gpu_ins->mgpu_fan_enabled = 1;
2621 mutex_unlock(&mgpu_info.mutex);
2627 * amdgpu_device_ip_late_init - run late init for hardware IPs
2629 * @adev: amdgpu_device pointer
2631 * Late initialization pass for hardware IPs. The list of all the hardware
2632 * IPs that make up the asic is walked and the late_init callbacks are run.
2633 * late_init covers any special initialization that an IP requires
2634 * after all of the have been initialized or something that needs to happen
2635 * late in the init process.
2636 * Returns 0 on success, negative error code on failure.
2638 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2640 struct amdgpu_gpu_instance *gpu_instance;
2643 for (i = 0; i < adev->num_ip_blocks; i++) {
2644 if (!adev->ip_blocks[i].status.hw)
2646 if (adev->ip_blocks[i].version->funcs->late_init) {
2647 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2649 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2650 adev->ip_blocks[i].version->funcs->name, r);
2654 adev->ip_blocks[i].status.late_initialized = true;
2657 r = amdgpu_ras_late_init(adev);
2659 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2663 amdgpu_ras_set_error_query_ready(adev, true);
2665 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2666 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2668 amdgpu_device_fill_reset_magic(adev);
2670 r = amdgpu_device_enable_mgpu_fan_boost();
2672 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2674 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2675 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2676 adev->asic_type == CHIP_ALDEBARAN ))
2677 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2679 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2680 mutex_lock(&mgpu_info.mutex);
2683 * Reset device p-state to low as this was booted with high.
2685 * This should be performed only after all devices from the same
2686 * hive get initialized.
2688 * However, it's unknown how many device in the hive in advance.
2689 * As this is counted one by one during devices initializations.
2691 * So, we wait for all XGMI interlinked devices initialized.
2692 * This may bring some delays as those devices may come from
2693 * different hives. But that should be OK.
2695 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2696 for (i = 0; i < mgpu_info.num_gpu; i++) {
2697 gpu_instance = &(mgpu_info.gpu_ins[i]);
2698 if (gpu_instance->adev->flags & AMD_IS_APU)
2701 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2702 AMDGPU_XGMI_PSTATE_MIN);
2704 DRM_ERROR("pstate setting failed (%d).\n", r);
2710 mutex_unlock(&mgpu_info.mutex);
2717 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2719 * @adev: amdgpu_device pointer
2721 * For ASICs need to disable SMC first
2723 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2727 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2730 for (i = 0; i < adev->num_ip_blocks; i++) {
2731 if (!adev->ip_blocks[i].status.hw)
2733 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2734 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2735 /* XXX handle errors */
2737 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2738 adev->ip_blocks[i].version->funcs->name, r);
2740 adev->ip_blocks[i].status.hw = false;
2746 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2750 for (i = 0; i < adev->num_ip_blocks; i++) {
2751 if (!adev->ip_blocks[i].version->funcs->early_fini)
2754 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2756 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2757 adev->ip_blocks[i].version->funcs->name, r);
2761 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2762 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2764 amdgpu_amdkfd_suspend(adev, false);
2766 /* Workaroud for ASICs need to disable SMC first */
2767 amdgpu_device_smu_fini_early(adev);
2769 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2770 if (!adev->ip_blocks[i].status.hw)
2773 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2774 /* XXX handle errors */
2776 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2777 adev->ip_blocks[i].version->funcs->name, r);
2780 adev->ip_blocks[i].status.hw = false;
2783 if (amdgpu_sriov_vf(adev)) {
2784 if (amdgpu_virt_release_full_gpu(adev, false))
2785 DRM_ERROR("failed to release exclusive mode on fini\n");
2792 * amdgpu_device_ip_fini - run fini for hardware IPs
2794 * @adev: amdgpu_device pointer
2796 * Main teardown pass for hardware IPs. The list of all the hardware
2797 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2798 * are run. hw_fini tears down the hardware associated with each IP
2799 * and sw_fini tears down any software state associated with each IP.
2800 * Returns 0 on success, negative error code on failure.
2802 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2806 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2807 amdgpu_virt_release_ras_err_handler_data(adev);
2809 if (adev->gmc.xgmi.num_physical_nodes > 1)
2810 amdgpu_xgmi_remove_device(adev);
2812 amdgpu_amdkfd_device_fini_sw(adev);
2814 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2815 if (!adev->ip_blocks[i].status.sw)
2818 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2819 amdgpu_ucode_free_bo(adev);
2820 amdgpu_free_static_csa(&adev->virt.csa_obj);
2821 amdgpu_device_wb_fini(adev);
2822 amdgpu_device_vram_scratch_fini(adev);
2823 amdgpu_ib_pool_fini(adev);
2826 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2827 /* XXX handle errors */
2829 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2830 adev->ip_blocks[i].version->funcs->name, r);
2832 adev->ip_blocks[i].status.sw = false;
2833 adev->ip_blocks[i].status.valid = false;
2836 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2837 if (!adev->ip_blocks[i].status.late_initialized)
2839 if (adev->ip_blocks[i].version->funcs->late_fini)
2840 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2841 adev->ip_blocks[i].status.late_initialized = false;
2844 amdgpu_ras_fini(adev);
2850 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2852 * @work: work_struct.
2854 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2856 struct amdgpu_device *adev =
2857 container_of(work, struct amdgpu_device, delayed_init_work.work);
2860 r = amdgpu_ib_ring_tests(adev);
2862 DRM_ERROR("ib ring test failed (%d).\n", r);
2865 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2867 struct amdgpu_device *adev =
2868 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2870 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2871 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2873 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2874 adev->gfx.gfx_off_state = true;
2878 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2880 * @adev: amdgpu_device pointer
2882 * Main suspend function for hardware IPs. The list of all the hardware
2883 * IPs that make up the asic is walked, clockgating is disabled and the
2884 * suspend callbacks are run. suspend puts the hardware and software state
2885 * in each IP into a state suitable for suspend.
2886 * Returns 0 on success, negative error code on failure.
2888 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2892 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2893 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2895 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2896 if (!adev->ip_blocks[i].status.valid)
2899 /* displays are handled separately */
2900 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2903 /* XXX handle errors */
2904 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2905 /* XXX handle errors */
2907 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2908 adev->ip_blocks[i].version->funcs->name, r);
2912 adev->ip_blocks[i].status.hw = false;
2919 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2921 * @adev: amdgpu_device pointer
2923 * Main suspend function for hardware IPs. The list of all the hardware
2924 * IPs that make up the asic is walked, clockgating is disabled and the
2925 * suspend callbacks are run. suspend puts the hardware and software state
2926 * in each IP into a state suitable for suspend.
2927 * Returns 0 on success, negative error code on failure.
2929 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2934 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2936 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2937 if (!adev->ip_blocks[i].status.valid)
2939 /* displays are handled in phase1 */
2940 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2942 /* PSP lost connection when err_event_athub occurs */
2943 if (amdgpu_ras_intr_triggered() &&
2944 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2945 adev->ip_blocks[i].status.hw = false;
2949 /* skip unnecessary suspend if we do not initialize them yet */
2950 if (adev->gmc.xgmi.pending_reset &&
2951 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2952 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2953 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2954 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2955 adev->ip_blocks[i].status.hw = false;
2959 /* skip suspend of gfx and psp for S0ix
2960 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2961 * like at runtime. PSP is also part of the always on hardware
2962 * so no need to suspend it.
2964 if (adev->in_s0ix &&
2965 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2966 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2969 /* XXX handle errors */
2970 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2971 /* XXX handle errors */
2973 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2974 adev->ip_blocks[i].version->funcs->name, r);
2976 adev->ip_blocks[i].status.hw = false;
2977 /* handle putting the SMC in the appropriate state */
2978 if(!amdgpu_sriov_vf(adev)){
2979 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2980 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2982 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2983 adev->mp1_state, r);
2994 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2996 * @adev: amdgpu_device pointer
2998 * Main suspend function for hardware IPs. The list of all the hardware
2999 * IPs that make up the asic is walked, clockgating is disabled and the
3000 * suspend callbacks are run. suspend puts the hardware and software state
3001 * in each IP into a state suitable for suspend.
3002 * Returns 0 on success, negative error code on failure.
3004 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3008 if (amdgpu_sriov_vf(adev)) {
3009 amdgpu_virt_fini_data_exchange(adev);
3010 amdgpu_virt_request_full_gpu(adev, false);
3013 r = amdgpu_device_ip_suspend_phase1(adev);
3016 r = amdgpu_device_ip_suspend_phase2(adev);
3018 if (amdgpu_sriov_vf(adev))
3019 amdgpu_virt_release_full_gpu(adev, false);
3024 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3028 static enum amd_ip_block_type ip_order[] = {
3029 AMD_IP_BLOCK_TYPE_GMC,
3030 AMD_IP_BLOCK_TYPE_COMMON,
3031 AMD_IP_BLOCK_TYPE_PSP,
3032 AMD_IP_BLOCK_TYPE_IH,
3035 for (i = 0; i < adev->num_ip_blocks; i++) {
3037 struct amdgpu_ip_block *block;
3039 block = &adev->ip_blocks[i];
3040 block->status.hw = false;
3042 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3044 if (block->version->type != ip_order[j] ||
3045 !block->status.valid)
3048 r = block->version->funcs->hw_init(adev);
3049 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3052 block->status.hw = true;
3059 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3063 static enum amd_ip_block_type ip_order[] = {
3064 AMD_IP_BLOCK_TYPE_SMC,
3065 AMD_IP_BLOCK_TYPE_DCE,
3066 AMD_IP_BLOCK_TYPE_GFX,
3067 AMD_IP_BLOCK_TYPE_SDMA,
3068 AMD_IP_BLOCK_TYPE_UVD,
3069 AMD_IP_BLOCK_TYPE_VCE,
3070 AMD_IP_BLOCK_TYPE_VCN
3073 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3075 struct amdgpu_ip_block *block;
3077 for (j = 0; j < adev->num_ip_blocks; j++) {
3078 block = &adev->ip_blocks[j];
3080 if (block->version->type != ip_order[i] ||
3081 !block->status.valid ||
3085 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3086 r = block->version->funcs->resume(adev);
3088 r = block->version->funcs->hw_init(adev);
3090 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3093 block->status.hw = true;
3101 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3103 * @adev: amdgpu_device pointer
3105 * First resume function for hardware IPs. The list of all the hardware
3106 * IPs that make up the asic is walked and the resume callbacks are run for
3107 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3108 * after a suspend and updates the software state as necessary. This
3109 * function is also used for restoring the GPU after a GPU reset.
3110 * Returns 0 on success, negative error code on failure.
3112 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3116 for (i = 0; i < adev->num_ip_blocks; i++) {
3117 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3119 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3120 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3121 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3123 r = adev->ip_blocks[i].version->funcs->resume(adev);
3125 DRM_ERROR("resume of IP block <%s> failed %d\n",
3126 adev->ip_blocks[i].version->funcs->name, r);
3129 adev->ip_blocks[i].status.hw = true;
3137 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3139 * @adev: amdgpu_device pointer
3141 * First resume function for hardware IPs. The list of all the hardware
3142 * IPs that make up the asic is walked and the resume callbacks are run for
3143 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3144 * functional state after a suspend and updates the software state as
3145 * necessary. This function is also used for restoring the GPU after a GPU
3147 * Returns 0 on success, negative error code on failure.
3149 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3153 for (i = 0; i < adev->num_ip_blocks; i++) {
3154 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3156 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3157 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3158 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3159 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3161 r = adev->ip_blocks[i].version->funcs->resume(adev);
3163 DRM_ERROR("resume of IP block <%s> failed %d\n",
3164 adev->ip_blocks[i].version->funcs->name, r);
3167 adev->ip_blocks[i].status.hw = true;
3174 * amdgpu_device_ip_resume - run resume for hardware IPs
3176 * @adev: amdgpu_device pointer
3178 * Main resume function for hardware IPs. The hardware IPs
3179 * are split into two resume functions because they are
3180 * are also used in in recovering from a GPU reset and some additional
3181 * steps need to be take between them. In this case (S3/S4) they are
3183 * Returns 0 on success, negative error code on failure.
3185 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3189 r = amdgpu_amdkfd_resume_iommu(adev);
3193 r = amdgpu_device_ip_resume_phase1(adev);
3197 r = amdgpu_device_fw_loading(adev);
3201 r = amdgpu_device_ip_resume_phase2(adev);
3207 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3209 * @adev: amdgpu_device pointer
3211 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3213 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3215 if (amdgpu_sriov_vf(adev)) {
3216 if (adev->is_atom_fw) {
3217 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3218 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3220 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3221 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3224 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3225 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3230 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3232 * @asic_type: AMD asic type
3234 * Check if there is DC (new modesetting infrastructre) support for an asic.
3235 * returns true if DC has support, false if not.
3237 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3239 switch (asic_type) {
3240 #ifdef CONFIG_DRM_AMDGPU_SI
3244 /* chips with no display hardware */
3246 #if defined(CONFIG_DRM_AMD_DC)
3252 * We have systems in the wild with these ASICs that require
3253 * LVDS and VGA support which is not supported with DC.
3255 * Fallback to the non-DC driver here by default so as not to
3256 * cause regressions.
3258 #if defined(CONFIG_DRM_AMD_DC_SI)
3259 return amdgpu_dc > 0;
3268 * We have systems in the wild with these ASICs that require
3269 * LVDS and VGA support which is not supported with DC.
3271 * Fallback to the non-DC driver here by default so as not to
3272 * cause regressions.
3274 return amdgpu_dc > 0;
3278 case CHIP_POLARIS10:
3279 case CHIP_POLARIS11:
3280 case CHIP_POLARIS12:
3287 #if defined(CONFIG_DRM_AMD_DC_DCN)
3293 case CHIP_CYAN_SKILLFISH:
3294 case CHIP_SIENNA_CICHLID:
3295 case CHIP_NAVY_FLOUNDER:
3296 case CHIP_DIMGREY_CAVEFISH:
3297 case CHIP_BEIGE_GOBY:
3299 case CHIP_YELLOW_CARP:
3302 return amdgpu_dc != 0;
3306 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3307 "but isn't supported by ASIC, ignoring\n");
3314 * amdgpu_device_has_dc_support - check if dc is supported
3316 * @adev: amdgpu_device pointer
3318 * Returns true for supported, false for not supported
3320 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3322 if (amdgpu_sriov_vf(adev) ||
3323 adev->enable_virtual_display ||
3324 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3327 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3330 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3332 struct amdgpu_device *adev =
3333 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3334 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3336 /* It's a bug to not have a hive within this function */
3341 * Use task barrier to synchronize all xgmi reset works across the
3342 * hive. task_barrier_enter and task_barrier_exit will block
3343 * until all the threads running the xgmi reset works reach
3344 * those points. task_barrier_full will do both blocks.
3346 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3348 task_barrier_enter(&hive->tb);
3349 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3351 if (adev->asic_reset_res)
3354 task_barrier_exit(&hive->tb);
3355 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3357 if (adev->asic_reset_res)
3360 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3361 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3362 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3365 task_barrier_full(&hive->tb);
3366 adev->asic_reset_res = amdgpu_asic_reset(adev);
3370 if (adev->asic_reset_res)
3371 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3372 adev->asic_reset_res, adev_to_drm(adev)->unique);
3373 amdgpu_put_xgmi_hive(hive);
3376 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3378 char *input = amdgpu_lockup_timeout;
3379 char *timeout_setting = NULL;
3385 * By default timeout for non compute jobs is 10000
3386 * and 60000 for compute jobs.
3387 * In SR-IOV or passthrough mode, timeout for compute
3388 * jobs are 60000 by default.
3390 adev->gfx_timeout = msecs_to_jiffies(10000);
3391 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3392 if (amdgpu_sriov_vf(adev))
3393 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3394 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3396 adev->compute_timeout = msecs_to_jiffies(60000);
3398 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3399 while ((timeout_setting = strsep(&input, ",")) &&
3400 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3401 ret = kstrtol(timeout_setting, 0, &timeout);
3408 } else if (timeout < 0) {
3409 timeout = MAX_SCHEDULE_TIMEOUT;
3410 dev_warn(adev->dev, "lockup timeout disabled");
3411 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3413 timeout = msecs_to_jiffies(timeout);
3418 adev->gfx_timeout = timeout;
3421 adev->compute_timeout = timeout;
3424 adev->sdma_timeout = timeout;
3427 adev->video_timeout = timeout;
3434 * There is only one value specified and
3435 * it should apply to all non-compute jobs.
3438 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3439 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3440 adev->compute_timeout = adev->gfx_timeout;
3448 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3450 * @adev: amdgpu_device pointer
3452 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3454 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3456 struct iommu_domain *domain;
3458 domain = iommu_get_domain_for_dev(adev->dev);
3459 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3460 adev->ram_is_direct_mapped = true;
3463 static const struct attribute *amdgpu_dev_attributes[] = {
3464 &dev_attr_product_name.attr,
3465 &dev_attr_product_number.attr,
3466 &dev_attr_serial_number.attr,
3467 &dev_attr_pcie_replay_count.attr,
3472 * amdgpu_device_init - initialize the driver
3474 * @adev: amdgpu_device pointer
3475 * @flags: driver flags
3477 * Initializes the driver info and hw (all asics).
3478 * Returns 0 for success or an error on failure.
3479 * Called at driver startup.
3481 int amdgpu_device_init(struct amdgpu_device *adev,
3484 struct drm_device *ddev = adev_to_drm(adev);
3485 struct pci_dev *pdev = adev->pdev;
3490 adev->shutdown = false;
3491 adev->flags = flags;
3493 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3494 adev->asic_type = amdgpu_force_asic_type;
3496 adev->asic_type = flags & AMD_ASIC_MASK;
3498 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3499 if (amdgpu_emu_mode == 1)
3500 adev->usec_timeout *= 10;
3501 adev->gmc.gart_size = 512 * 1024 * 1024;
3502 adev->accel_working = false;
3503 adev->num_rings = 0;
3504 adev->mman.buffer_funcs = NULL;
3505 adev->mman.buffer_funcs_ring = NULL;
3506 adev->vm_manager.vm_pte_funcs = NULL;
3507 adev->vm_manager.vm_pte_num_scheds = 0;
3508 adev->gmc.gmc_funcs = NULL;
3509 adev->harvest_ip_mask = 0x0;
3510 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3511 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3513 adev->smc_rreg = &amdgpu_invalid_rreg;
3514 adev->smc_wreg = &amdgpu_invalid_wreg;
3515 adev->pcie_rreg = &amdgpu_invalid_rreg;
3516 adev->pcie_wreg = &amdgpu_invalid_wreg;
3517 adev->pciep_rreg = &amdgpu_invalid_rreg;
3518 adev->pciep_wreg = &amdgpu_invalid_wreg;
3519 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3520 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3521 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3522 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3523 adev->didt_rreg = &amdgpu_invalid_rreg;
3524 adev->didt_wreg = &amdgpu_invalid_wreg;
3525 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3526 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3527 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3528 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3530 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3531 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3532 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3534 /* mutex initialization are all done here so we
3535 * can recall function without having locking issues */
3536 mutex_init(&adev->firmware.mutex);
3537 mutex_init(&adev->pm.mutex);
3538 mutex_init(&adev->gfx.gpu_clock_mutex);
3539 mutex_init(&adev->srbm_mutex);
3540 mutex_init(&adev->gfx.pipe_reserve_mutex);
3541 mutex_init(&adev->gfx.gfx_off_mutex);
3542 mutex_init(&adev->grbm_idx_mutex);
3543 mutex_init(&adev->mn_lock);
3544 mutex_init(&adev->virt.vf_errors.lock);
3545 hash_init(adev->mn_hash);
3546 atomic_set(&adev->in_gpu_reset, 0);
3547 init_rwsem(&adev->reset_sem);
3548 mutex_init(&adev->psp.mutex);
3549 mutex_init(&adev->notifier_lock);
3550 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3551 mutex_init(&adev->benchmark_mutex);
3553 amdgpu_device_init_apu_flags(adev);
3555 r = amdgpu_device_check_arguments(adev);
3559 spin_lock_init(&adev->mmio_idx_lock);
3560 spin_lock_init(&adev->smc_idx_lock);
3561 spin_lock_init(&adev->pcie_idx_lock);
3562 spin_lock_init(&adev->uvd_ctx_idx_lock);
3563 spin_lock_init(&adev->didt_idx_lock);
3564 spin_lock_init(&adev->gc_cac_idx_lock);
3565 spin_lock_init(&adev->se_cac_idx_lock);
3566 spin_lock_init(&adev->audio_endpt_idx_lock);
3567 spin_lock_init(&adev->mm_stats.lock);
3569 INIT_LIST_HEAD(&adev->shadow_list);
3570 mutex_init(&adev->shadow_list_lock);
3572 INIT_LIST_HEAD(&adev->reset_list);
3574 INIT_LIST_HEAD(&adev->ras_list);
3576 INIT_DELAYED_WORK(&adev->delayed_init_work,
3577 amdgpu_device_delayed_init_work_handler);
3578 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3579 amdgpu_device_delay_enable_gfx_off);
3581 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3583 adev->gfx.gfx_off_req_count = 1;
3584 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3586 atomic_set(&adev->throttling_logging_enabled, 1);
3588 * If throttling continues, logging will be performed every minute
3589 * to avoid log flooding. "-1" is subtracted since the thermal
3590 * throttling interrupt comes every second. Thus, the total logging
3591 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3592 * for throttling interrupt) = 60 seconds.
3594 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3595 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3597 /* Registers mapping */
3598 /* TODO: block userspace mapping of io register */
3599 if (adev->asic_type >= CHIP_BONAIRE) {
3600 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3601 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3603 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3604 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3607 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3608 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3610 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3611 if (adev->rmmio == NULL) {
3614 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3615 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3617 amdgpu_device_get_pcie_info(adev);
3620 DRM_INFO("MCBP is enabled\n");
3622 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3623 adev->enable_mes = true;
3625 /* detect hw virtualization here */
3626 amdgpu_detect_virtualization(adev);
3628 r = amdgpu_device_get_job_timeout_settings(adev);
3630 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3634 /* early init functions */
3635 r = amdgpu_device_ip_early_init(adev);
3639 /* Need to get xgmi info early to decide the reset behavior*/
3640 if (adev->gmc.xgmi.supported) {
3641 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3646 /* enable PCIE atomic ops */
3647 if (amdgpu_sriov_vf(adev))
3648 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3649 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3650 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3652 adev->have_atomics_support =
3653 !pci_enable_atomic_ops_to_root(adev->pdev,
3654 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3655 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3656 if (!adev->have_atomics_support)
3657 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3659 /* doorbell bar mapping and doorbell index init*/
3660 amdgpu_device_doorbell_init(adev);
3662 if (amdgpu_emu_mode == 1) {
3663 /* post the asic on emulation mode */
3664 emu_soc_asic_init(adev);
3665 goto fence_driver_init;
3668 amdgpu_reset_init(adev);
3670 /* detect if we are with an SRIOV vbios */
3671 amdgpu_device_detect_sriov_bios(adev);
3673 /* check if we need to reset the asic
3674 * E.g., driver was not cleanly unloaded previously, etc.
3676 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3677 if (adev->gmc.xgmi.num_physical_nodes) {
3678 dev_info(adev->dev, "Pending hive reset.\n");
3679 adev->gmc.xgmi.pending_reset = true;
3680 /* Only need to init necessary block for SMU to handle the reset */
3681 for (i = 0; i < adev->num_ip_blocks; i++) {
3682 if (!adev->ip_blocks[i].status.valid)
3684 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3685 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3686 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3687 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3688 DRM_DEBUG("IP %s disabled for hw_init.\n",
3689 adev->ip_blocks[i].version->funcs->name);
3690 adev->ip_blocks[i].status.hw = true;
3694 r = amdgpu_asic_reset(adev);
3696 dev_err(adev->dev, "asic reset on init failed\n");
3702 pci_enable_pcie_error_reporting(adev->pdev);
3704 /* Post card if necessary */
3705 if (amdgpu_device_need_post(adev)) {
3707 dev_err(adev->dev, "no vBIOS found\n");
3711 DRM_INFO("GPU posting now...\n");
3712 r = amdgpu_device_asic_init(adev);
3714 dev_err(adev->dev, "gpu post error!\n");
3719 if (adev->is_atom_fw) {
3720 /* Initialize clocks */
3721 r = amdgpu_atomfirmware_get_clock_info(adev);
3723 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3724 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3728 /* Initialize clocks */
3729 r = amdgpu_atombios_get_clock_info(adev);
3731 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3732 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3735 /* init i2c buses */
3736 if (!amdgpu_device_has_dc_support(adev))
3737 amdgpu_atombios_i2c_init(adev);
3742 r = amdgpu_fence_driver_sw_init(adev);
3744 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3745 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3749 /* init the mode config */
3750 drm_mode_config_init(adev_to_drm(adev));
3752 r = amdgpu_device_ip_init(adev);
3754 /* failed in exclusive mode due to timeout */
3755 if (amdgpu_sriov_vf(adev) &&
3756 !amdgpu_sriov_runtime(adev) &&
3757 amdgpu_virt_mmio_blocked(adev) &&
3758 !amdgpu_virt_wait_reset(adev)) {
3759 dev_err(adev->dev, "VF exclusive mode timeout\n");
3760 /* Don't send request since VF is inactive. */
3761 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3762 adev->virt.ops = NULL;
3764 goto release_ras_con;
3766 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3767 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3768 goto release_ras_con;
3771 amdgpu_fence_driver_hw_init(adev);
3774 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3775 adev->gfx.config.max_shader_engines,
3776 adev->gfx.config.max_sh_per_se,
3777 adev->gfx.config.max_cu_per_sh,
3778 adev->gfx.cu_info.number);
3780 adev->accel_working = true;
3782 amdgpu_vm_check_compute_bug(adev);
3784 /* Initialize the buffer migration limit. */
3785 if (amdgpu_moverate >= 0)
3786 max_MBps = amdgpu_moverate;
3788 max_MBps = 8; /* Allow 8 MB/s. */
3789 /* Get a log2 for easy divisions. */
3790 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3792 r = amdgpu_pm_sysfs_init(adev);
3794 adev->pm_sysfs_en = false;
3795 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3797 adev->pm_sysfs_en = true;
3799 r = amdgpu_ucode_sysfs_init(adev);
3801 adev->ucode_sysfs_en = false;
3802 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3804 adev->ucode_sysfs_en = true;
3806 if ((amdgpu_testing & 1)) {
3807 if (adev->accel_working)
3808 amdgpu_test_moves(adev);
3810 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3814 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3815 * Otherwise the mgpu fan boost feature will be skipped due to the
3816 * gpu instance is counted less.
3818 amdgpu_register_gpu_instance(adev);
3820 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3821 * explicit gating rather than handling it automatically.
3823 if (!adev->gmc.xgmi.pending_reset) {
3824 r = amdgpu_device_ip_late_init(adev);
3826 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3827 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3828 goto release_ras_con;
3831 amdgpu_ras_resume(adev);
3832 queue_delayed_work(system_wq, &adev->delayed_init_work,
3833 msecs_to_jiffies(AMDGPU_RESUME_MS));
3836 if (amdgpu_sriov_vf(adev))
3837 flush_delayed_work(&adev->delayed_init_work);
3839 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3841 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3843 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3844 r = amdgpu_pmu_init(adev);
3846 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3848 /* Have stored pci confspace at hand for restore in sudden PCI error */
3849 if (amdgpu_device_cache_pci_state(adev->pdev))
3850 pci_restore_state(pdev);
3852 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3853 /* this will fail for cards that aren't VGA class devices, just
3855 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3856 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3858 if (amdgpu_device_supports_px(ddev)) {
3860 vga_switcheroo_register_client(adev->pdev,
3861 &amdgpu_switcheroo_ops, px);
3862 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3865 if (adev->gmc.xgmi.pending_reset)
3866 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3867 msecs_to_jiffies(AMDGPU_RESUME_MS));
3869 amdgpu_device_check_iommu_direct_map(adev);
3874 amdgpu_release_ras_context(adev);
3877 amdgpu_vf_error_trans_all(adev);
3882 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3885 /* Clear all CPU mappings pointing to this device */
3886 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3888 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3889 amdgpu_device_doorbell_fini(adev);
3891 iounmap(adev->rmmio);
3893 if (adev->mman.aper_base_kaddr)
3894 iounmap(adev->mman.aper_base_kaddr);
3895 adev->mman.aper_base_kaddr = NULL;
3897 /* Memory manager related */
3898 if (!adev->gmc.xgmi.connected_to_cpu) {
3899 arch_phys_wc_del(adev->gmc.vram_mtrr);
3900 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3905 * amdgpu_device_fini_hw - tear down the driver
3907 * @adev: amdgpu_device pointer
3909 * Tear down the driver info (all asics).
3910 * Called at driver shutdown.
3912 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3914 dev_info(adev->dev, "amdgpu: finishing device.\n");
3915 flush_delayed_work(&adev->delayed_init_work);
3916 if (adev->mman.initialized) {
3917 flush_delayed_work(&adev->mman.bdev.wq);
3918 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3920 adev->shutdown = true;
3922 /* make sure IB test finished before entering exclusive mode
3923 * to avoid preemption on IB test
3925 if (amdgpu_sriov_vf(adev)) {
3926 amdgpu_virt_request_full_gpu(adev, false);
3927 amdgpu_virt_fini_data_exchange(adev);
3930 /* disable all interrupts */
3931 amdgpu_irq_disable_all(adev);
3932 if (adev->mode_info.mode_config_initialized){
3933 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3934 drm_helper_force_disable_all(adev_to_drm(adev));
3936 drm_atomic_helper_shutdown(adev_to_drm(adev));
3938 amdgpu_fence_driver_hw_fini(adev);
3940 if (adev->pm_sysfs_en)
3941 amdgpu_pm_sysfs_fini(adev);
3942 if (adev->ucode_sysfs_en)
3943 amdgpu_ucode_sysfs_fini(adev);
3944 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3946 /* disable ras feature must before hw fini */
3947 amdgpu_ras_pre_fini(adev);
3949 amdgpu_device_ip_fini_early(adev);
3951 amdgpu_irq_fini_hw(adev);
3953 if (adev->mman.initialized)
3954 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3956 amdgpu_gart_dummy_page_fini(adev);
3958 if (drm_dev_is_unplugged(adev_to_drm(adev)))
3959 amdgpu_device_unmap_mmio(adev);
3963 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3967 amdgpu_fence_driver_sw_fini(adev);
3968 amdgpu_device_ip_fini(adev);
3969 release_firmware(adev->firmware.gpu_info_fw);
3970 adev->firmware.gpu_info_fw = NULL;
3971 adev->accel_working = false;
3973 amdgpu_reset_fini(adev);
3975 /* free i2c buses */
3976 if (!amdgpu_device_has_dc_support(adev))
3977 amdgpu_i2c_fini(adev);
3979 if (amdgpu_emu_mode != 1)
3980 amdgpu_atombios_fini(adev);
3984 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3985 vga_switcheroo_unregister_client(adev->pdev);
3986 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3988 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3989 vga_client_unregister(adev->pdev);
3991 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
3993 iounmap(adev->rmmio);
3995 amdgpu_device_doorbell_fini(adev);
3999 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4000 amdgpu_pmu_fini(adev);
4001 if (adev->mman.discovery_bin)
4002 amdgpu_discovery_fini(adev);
4004 kfree(adev->pci_state);
4009 * amdgpu_device_evict_resources - evict device resources
4010 * @adev: amdgpu device object
4012 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4013 * of the vram memory type. Mainly used for evicting device resources
4017 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
4019 /* No need to evict vram on APUs for suspend to ram or s2idle */
4020 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4023 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
4024 DRM_WARN("evicting device resources failed\n");
4032 * amdgpu_device_suspend - initiate device suspend
4034 * @dev: drm dev pointer
4035 * @fbcon : notify the fbdev of suspend
4037 * Puts the hw in the suspend state (all asics).
4038 * Returns 0 for success or an error on failure.
4039 * Called at driver suspend.
4041 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4043 struct amdgpu_device *adev = drm_to_adev(dev);
4045 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4048 adev->in_suspend = true;
4050 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4051 DRM_WARN("smart shift update failed\n");
4053 drm_kms_helper_poll_disable(dev);
4056 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4058 cancel_delayed_work_sync(&adev->delayed_init_work);
4060 amdgpu_ras_suspend(adev);
4062 amdgpu_device_ip_suspend_phase1(adev);
4065 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4067 amdgpu_device_evict_resources(adev);
4069 amdgpu_fence_driver_hw_fini(adev);
4071 amdgpu_device_ip_suspend_phase2(adev);
4077 * amdgpu_device_resume - initiate device resume
4079 * @dev: drm dev pointer
4080 * @fbcon : notify the fbdev of resume
4082 * Bring the hw back to operating state (all asics).
4083 * Returns 0 for success or an error on failure.
4084 * Called at driver resume.
4086 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4088 struct amdgpu_device *adev = drm_to_adev(dev);
4091 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4095 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4098 if (amdgpu_device_need_post(adev)) {
4099 r = amdgpu_device_asic_init(adev);
4101 dev_err(adev->dev, "amdgpu asic init failed\n");
4104 r = amdgpu_device_ip_resume(adev);
4106 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4109 amdgpu_fence_driver_hw_init(adev);
4111 r = amdgpu_device_ip_late_init(adev);
4115 queue_delayed_work(system_wq, &adev->delayed_init_work,
4116 msecs_to_jiffies(AMDGPU_RESUME_MS));
4118 if (!adev->in_s0ix) {
4119 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4124 /* Make sure IB tests flushed */
4125 flush_delayed_work(&adev->delayed_init_work);
4128 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4130 drm_kms_helper_poll_enable(dev);
4132 amdgpu_ras_resume(adev);
4135 * Most of the connector probing functions try to acquire runtime pm
4136 * refs to ensure that the GPU is powered on when connector polling is
4137 * performed. Since we're calling this from a runtime PM callback,
4138 * trying to acquire rpm refs will cause us to deadlock.
4140 * Since we're guaranteed to be holding the rpm lock, it's safe to
4141 * temporarily disable the rpm helpers so this doesn't deadlock us.
4144 dev->dev->power.disable_depth++;
4146 if (!amdgpu_device_has_dc_support(adev))
4147 drm_helper_hpd_irq_event(dev);
4149 drm_kms_helper_hotplug_event(dev);
4151 dev->dev->power.disable_depth--;
4153 adev->in_suspend = false;
4155 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4156 DRM_WARN("smart shift update failed\n");
4162 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4164 * @adev: amdgpu_device pointer
4166 * The list of all the hardware IPs that make up the asic is walked and
4167 * the check_soft_reset callbacks are run. check_soft_reset determines
4168 * if the asic is still hung or not.
4169 * Returns true if any of the IPs are still in a hung state, false if not.
4171 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4174 bool asic_hang = false;
4176 if (amdgpu_sriov_vf(adev))
4179 if (amdgpu_asic_need_full_reset(adev))
4182 for (i = 0; i < adev->num_ip_blocks; i++) {
4183 if (!adev->ip_blocks[i].status.valid)
4185 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4186 adev->ip_blocks[i].status.hang =
4187 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4188 if (adev->ip_blocks[i].status.hang) {
4189 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4197 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4199 * @adev: amdgpu_device pointer
4201 * The list of all the hardware IPs that make up the asic is walked and the
4202 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4203 * handles any IP specific hardware or software state changes that are
4204 * necessary for a soft reset to succeed.
4205 * Returns 0 on success, negative error code on failure.
4207 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4211 for (i = 0; i < adev->num_ip_blocks; i++) {
4212 if (!adev->ip_blocks[i].status.valid)
4214 if (adev->ip_blocks[i].status.hang &&
4215 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4216 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4226 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4228 * @adev: amdgpu_device pointer
4230 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4231 * reset is necessary to recover.
4232 * Returns true if a full asic reset is required, false if not.
4234 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4238 if (amdgpu_asic_need_full_reset(adev))
4241 for (i = 0; i < adev->num_ip_blocks; i++) {
4242 if (!adev->ip_blocks[i].status.valid)
4244 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4245 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4246 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4247 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4248 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4249 if (adev->ip_blocks[i].status.hang) {
4250 dev_info(adev->dev, "Some block need full reset!\n");
4259 * amdgpu_device_ip_soft_reset - do a soft reset
4261 * @adev: amdgpu_device pointer
4263 * The list of all the hardware IPs that make up the asic is walked and the
4264 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4265 * IP specific hardware or software state changes that are necessary to soft
4267 * Returns 0 on success, negative error code on failure.
4269 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4273 for (i = 0; i < adev->num_ip_blocks; i++) {
4274 if (!adev->ip_blocks[i].status.valid)
4276 if (adev->ip_blocks[i].status.hang &&
4277 adev->ip_blocks[i].version->funcs->soft_reset) {
4278 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4288 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4290 * @adev: amdgpu_device pointer
4292 * The list of all the hardware IPs that make up the asic is walked and the
4293 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4294 * handles any IP specific hardware or software state changes that are
4295 * necessary after the IP has been soft reset.
4296 * Returns 0 on success, negative error code on failure.
4298 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4302 for (i = 0; i < adev->num_ip_blocks; i++) {
4303 if (!adev->ip_blocks[i].status.valid)
4305 if (adev->ip_blocks[i].status.hang &&
4306 adev->ip_blocks[i].version->funcs->post_soft_reset)
4307 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4316 * amdgpu_device_recover_vram - Recover some VRAM contents
4318 * @adev: amdgpu_device pointer
4320 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4321 * restore things like GPUVM page tables after a GPU reset where
4322 * the contents of VRAM might be lost.
4325 * 0 on success, negative error code on failure.
4327 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4329 struct dma_fence *fence = NULL, *next = NULL;
4330 struct amdgpu_bo *shadow;
4331 struct amdgpu_bo_vm *vmbo;
4334 if (amdgpu_sriov_runtime(adev))
4335 tmo = msecs_to_jiffies(8000);
4337 tmo = msecs_to_jiffies(100);
4339 dev_info(adev->dev, "recover vram bo from shadow start\n");
4340 mutex_lock(&adev->shadow_list_lock);
4341 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4343 /* No need to recover an evicted BO */
4344 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4345 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4346 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4349 r = amdgpu_bo_restore_shadow(shadow, &next);
4354 tmo = dma_fence_wait_timeout(fence, false, tmo);
4355 dma_fence_put(fence);
4360 } else if (tmo < 0) {
4368 mutex_unlock(&adev->shadow_list_lock);
4371 tmo = dma_fence_wait_timeout(fence, false, tmo);
4372 dma_fence_put(fence);
4374 if (r < 0 || tmo <= 0) {
4375 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4379 dev_info(adev->dev, "recover vram bo from shadow done\n");
4385 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4387 * @adev: amdgpu_device pointer
4388 * @from_hypervisor: request from hypervisor
4390 * do VF FLR and reinitialize Asic
4391 * return 0 means succeeded otherwise failed
4393 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4394 bool from_hypervisor)
4397 struct amdgpu_hive_info *hive = NULL;
4398 int retry_limit = 0;
4401 amdgpu_amdkfd_pre_reset(adev);
4403 amdgpu_amdkfd_pre_reset(adev);
4405 if (from_hypervisor)
4406 r = amdgpu_virt_request_full_gpu(adev, true);
4408 r = amdgpu_virt_reset_gpu(adev);
4412 /* Resume IP prior to SMC */
4413 r = amdgpu_device_ip_reinit_early_sriov(adev);
4417 amdgpu_virt_init_data_exchange(adev);
4419 r = amdgpu_device_fw_loading(adev);
4423 /* now we are okay to resume SMC/CP/SDMA */
4424 r = amdgpu_device_ip_reinit_late_sriov(adev);
4428 hive = amdgpu_get_xgmi_hive(adev);
4429 /* Update PSP FW topology after reset */
4430 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4431 r = amdgpu_xgmi_update_topology(hive, adev);
4434 amdgpu_put_xgmi_hive(hive);
4437 amdgpu_irq_gpu_reset_resume_helper(adev);
4438 r = amdgpu_ib_ring_tests(adev);
4439 amdgpu_amdkfd_post_reset(adev);
4443 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4444 amdgpu_inc_vram_lost(adev);
4445 r = amdgpu_device_recover_vram(adev);
4447 amdgpu_virt_release_full_gpu(adev, true);
4449 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4450 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4454 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4461 * amdgpu_device_has_job_running - check if there is any job in mirror list
4463 * @adev: amdgpu_device pointer
4465 * check if there is any job in mirror list
4467 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4470 struct drm_sched_job *job;
4472 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4473 struct amdgpu_ring *ring = adev->rings[i];
4475 if (!ring || !ring->sched.thread)
4478 spin_lock(&ring->sched.job_list_lock);
4479 job = list_first_entry_or_null(&ring->sched.pending_list,
4480 struct drm_sched_job, list);
4481 spin_unlock(&ring->sched.job_list_lock);
4489 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4491 * @adev: amdgpu_device pointer
4493 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4496 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4498 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4499 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4503 if (amdgpu_gpu_recovery == 0)
4506 if (amdgpu_sriov_vf(adev))
4509 if (amdgpu_gpu_recovery == -1) {
4510 switch (adev->asic_type) {
4511 #ifdef CONFIG_DRM_AMDGPU_SI
4518 #ifdef CONFIG_DRM_AMDGPU_CIK
4525 case CHIP_CYAN_SKILLFISH:
4535 dev_info(adev->dev, "GPU recovery disabled.\n");
4539 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4544 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4546 dev_info(adev->dev, "GPU mode1 reset\n");
4549 pci_clear_master(adev->pdev);
4551 amdgpu_device_cache_pci_state(adev->pdev);
4553 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4554 dev_info(adev->dev, "GPU smu mode1 reset\n");
4555 ret = amdgpu_dpm_mode1_reset(adev);
4557 dev_info(adev->dev, "GPU psp mode1 reset\n");
4558 ret = psp_gpu_reset(adev);
4562 dev_err(adev->dev, "GPU mode1 reset failed\n");
4564 amdgpu_device_load_pci_state(adev->pdev);
4566 /* wait for asic to come out of reset */
4567 for (i = 0; i < adev->usec_timeout; i++) {
4568 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4570 if (memsize != 0xffffffff)
4575 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4579 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4580 struct amdgpu_reset_context *reset_context)
4583 struct amdgpu_job *job = NULL;
4584 bool need_full_reset =
4585 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4587 if (reset_context->reset_req_dev == adev)
4588 job = reset_context->job;
4590 if (amdgpu_sriov_vf(adev)) {
4591 /* stop the data exchange thread */
4592 amdgpu_virt_fini_data_exchange(adev);
4595 /* block all schedulers and reset given job's ring */
4596 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4597 struct amdgpu_ring *ring = adev->rings[i];
4599 if (!ring || !ring->sched.thread)
4602 /*clear job fence from fence drv to avoid force_completion
4603 *leave NULL and vm flush fence in fence drv */
4604 amdgpu_fence_driver_clear_job_fences(ring);
4606 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4607 amdgpu_fence_driver_force_completion(ring);
4611 drm_sched_increase_karma(&job->base);
4613 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4614 /* If reset handler not implemented, continue; otherwise return */
4620 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4621 if (!amdgpu_sriov_vf(adev)) {
4623 if (!need_full_reset)
4624 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4626 if (!need_full_reset) {
4627 amdgpu_device_ip_pre_soft_reset(adev);
4628 r = amdgpu_device_ip_soft_reset(adev);
4629 amdgpu_device_ip_post_soft_reset(adev);
4630 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4631 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4632 need_full_reset = true;
4636 if (need_full_reset)
4637 r = amdgpu_device_ip_suspend(adev);
4638 if (need_full_reset)
4639 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4641 clear_bit(AMDGPU_NEED_FULL_RESET,
4642 &reset_context->flags);
4648 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4649 struct amdgpu_reset_context *reset_context)
4651 struct amdgpu_device *tmp_adev = NULL;
4652 bool need_full_reset, skip_hw_reset, vram_lost = false;
4655 /* Try reset handler method first */
4656 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4658 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4659 /* If reset handler not implemented, continue; otherwise return */
4665 /* Reset handler not implemented, use the default method */
4667 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4668 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4671 * ASIC reset has to be done on all XGMI hive nodes ASAP
4672 * to allow proper links negotiation in FW (within 1 sec)
4674 if (!skip_hw_reset && need_full_reset) {
4675 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4676 /* For XGMI run all resets in parallel to speed up the process */
4677 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4678 tmp_adev->gmc.xgmi.pending_reset = false;
4679 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4682 r = amdgpu_asic_reset(tmp_adev);
4685 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4686 r, adev_to_drm(tmp_adev)->unique);
4691 /* For XGMI wait for all resets to complete before proceed */
4693 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4694 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4695 flush_work(&tmp_adev->xgmi_reset_work);
4696 r = tmp_adev->asic_reset_res;
4704 if (!r && amdgpu_ras_intr_triggered()) {
4705 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4706 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4707 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4708 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4711 amdgpu_ras_intr_cleared();
4714 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4715 if (need_full_reset) {
4717 r = amdgpu_device_asic_init(tmp_adev);
4719 dev_warn(tmp_adev->dev, "asic atom init failed!");
4721 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4722 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4726 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4730 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4732 DRM_INFO("VRAM is lost due to GPU reset!\n");
4733 amdgpu_inc_vram_lost(tmp_adev);
4736 r = amdgpu_device_fw_loading(tmp_adev);
4740 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4745 amdgpu_device_fill_reset_magic(tmp_adev);
4748 * Add this ASIC as tracked as reset was already
4749 * complete successfully.
4751 amdgpu_register_gpu_instance(tmp_adev);
4753 if (!reset_context->hive &&
4754 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4755 amdgpu_xgmi_add_device(tmp_adev);
4757 r = amdgpu_device_ip_late_init(tmp_adev);
4761 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4764 * The GPU enters bad state once faulty pages
4765 * by ECC has reached the threshold, and ras
4766 * recovery is scheduled next. So add one check
4767 * here to break recovery if it indeed exceeds
4768 * bad page threshold, and remind user to
4769 * retire this GPU or setting one bigger
4770 * bad_page_threshold value to fix this once
4771 * probing driver again.
4773 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4775 amdgpu_ras_resume(tmp_adev);
4781 /* Update PSP FW topology after reset */
4782 if (reset_context->hive &&
4783 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4784 r = amdgpu_xgmi_update_topology(
4785 reset_context->hive, tmp_adev);
4791 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4792 r = amdgpu_ib_ring_tests(tmp_adev);
4794 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4795 need_full_reset = true;
4802 r = amdgpu_device_recover_vram(tmp_adev);
4804 tmp_adev->asic_reset_res = r;
4808 if (need_full_reset)
4809 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4811 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4815 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4816 struct amdgpu_hive_info *hive)
4818 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4822 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4824 down_write(&adev->reset_sem);
4827 switch (amdgpu_asic_reset_method(adev)) {
4828 case AMD_RESET_METHOD_MODE1:
4829 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4831 case AMD_RESET_METHOD_MODE2:
4832 adev->mp1_state = PP_MP1_STATE_RESET;
4835 adev->mp1_state = PP_MP1_STATE_NONE;
4842 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4844 amdgpu_vf_error_trans_all(adev);
4845 adev->mp1_state = PP_MP1_STATE_NONE;
4846 atomic_set(&adev->in_gpu_reset, 0);
4847 up_write(&adev->reset_sem);
4851 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4852 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4854 * unlock won't require roll back.
4856 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4858 struct amdgpu_device *tmp_adev = NULL;
4860 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
4862 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4865 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4866 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4869 } else if (!amdgpu_device_lock_adev(adev, hive))
4874 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4876 * if the lockup iteration break in the middle of a hive,
4877 * it may means there may has a race issue,
4878 * or a hive device locked up independently.
4879 * we may be in trouble and may not, so will try to roll back
4880 * the lock and give out a warnning.
4882 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4883 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4884 amdgpu_device_unlock_adev(tmp_adev);
4890 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4892 struct pci_dev *p = NULL;
4894 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4895 adev->pdev->bus->number, 1);
4897 pm_runtime_enable(&(p->dev));
4898 pm_runtime_resume(&(p->dev));
4902 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4904 enum amd_reset_method reset_method;
4905 struct pci_dev *p = NULL;
4909 * For now, only BACO and mode1 reset are confirmed
4910 * to suffer the audio issue without proper suspended.
4912 reset_method = amdgpu_asic_reset_method(adev);
4913 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4914 (reset_method != AMD_RESET_METHOD_MODE1))
4917 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4918 adev->pdev->bus->number, 1);
4922 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4925 * If we cannot get the audio device autosuspend delay,
4926 * a fixed 4S interval will be used. Considering 3S is
4927 * the audio controller default autosuspend delay setting.
4928 * 4S used here is guaranteed to cover that.
4930 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4932 while (!pm_runtime_status_suspended(&(p->dev))) {
4933 if (!pm_runtime_suspend(&(p->dev)))
4936 if (expires < ktime_get_mono_fast_ns()) {
4937 dev_warn(adev->dev, "failed to suspend display audio\n");
4938 /* TODO: abort the succeeding gpu reset? */
4943 pm_runtime_disable(&(p->dev));
4948 static void amdgpu_device_recheck_guilty_jobs(
4949 struct amdgpu_device *adev, struct list_head *device_list_handle,
4950 struct amdgpu_reset_context *reset_context)
4954 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4955 struct amdgpu_ring *ring = adev->rings[i];
4957 struct drm_sched_job *s_job;
4959 if (!ring || !ring->sched.thread)
4962 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4963 struct drm_sched_job, list);
4967 /* clear job's guilty and depend the folowing step to decide the real one */
4968 drm_sched_reset_karma(s_job);
4969 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4970 * to make sure fence is balanced */
4971 dma_fence_get(s_job->s_fence->parent);
4972 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4974 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4975 if (ret == 0) { /* timeout */
4976 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4977 ring->sched.name, s_job->id);
4980 drm_sched_increase_karma(s_job);
4983 if (amdgpu_sriov_vf(adev)) {
4984 amdgpu_virt_fini_data_exchange(adev);
4985 r = amdgpu_device_reset_sriov(adev, false);
4987 adev->asic_reset_res = r;
4989 clear_bit(AMDGPU_SKIP_HW_RESET,
4990 &reset_context->flags);
4991 r = amdgpu_do_asic_reset(device_list_handle,
4993 if (r && r == -EAGAIN)
4998 * add reset counter so that the following
4999 * resubmitted job could flush vmid
5001 atomic_inc(&adev->gpu_reset_counter);
5005 /* got the hw fence, signal finished fence */
5006 atomic_dec(ring->sched.score);
5007 dma_fence_put(s_job->s_fence->parent);
5008 dma_fence_get(&s_job->s_fence->finished);
5009 dma_fence_signal(&s_job->s_fence->finished);
5010 dma_fence_put(&s_job->s_fence->finished);
5012 /* remove node from list and free the job */
5013 spin_lock(&ring->sched.job_list_lock);
5014 list_del_init(&s_job->list);
5015 spin_unlock(&ring->sched.job_list_lock);
5016 ring->sched.ops->free_job(s_job);
5021 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5023 * @adev: amdgpu_device pointer
5024 * @job: which job trigger hang
5026 * Attempt to reset the GPU if it has hung (all asics).
5027 * Attempt to do soft-reset or full-reset and reinitialize Asic
5028 * Returns 0 for success or an error on failure.
5031 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5032 struct amdgpu_job *job)
5034 struct list_head device_list, *device_list_handle = NULL;
5035 bool job_signaled = false;
5036 struct amdgpu_hive_info *hive = NULL;
5037 struct amdgpu_device *tmp_adev = NULL;
5039 bool need_emergency_restart = false;
5040 bool audio_suspended = false;
5041 int tmp_vram_lost_counter;
5042 struct amdgpu_reset_context reset_context;
5044 memset(&reset_context, 0, sizeof(reset_context));
5047 * Special case: RAS triggered and full reset isn't supported
5049 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5052 * Flush RAM to disk so that after reboot
5053 * the user can read log and see why the system rebooted.
5055 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5056 DRM_WARN("Emergency reboot.");
5059 emergency_restart();
5062 dev_info(adev->dev, "GPU %s begin!\n",
5063 need_emergency_restart ? "jobs stop":"reset");
5066 * Here we trylock to avoid chain of resets executing from
5067 * either trigger by jobs on different adevs in XGMI hive or jobs on
5068 * different schedulers for same device while this TO handler is running.
5069 * We always reset all schedulers for device and all devices for XGMI
5070 * hive so that should take care of them too.
5072 if (!amdgpu_sriov_vf(adev))
5073 hive = amdgpu_get_xgmi_hive(adev);
5075 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
5076 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
5077 job ? job->base.id : -1, hive->hive_id);
5078 amdgpu_put_xgmi_hive(hive);
5080 drm_sched_increase_karma(&job->base);
5083 mutex_lock(&hive->hive_lock);
5086 reset_context.method = AMD_RESET_METHOD_NONE;
5087 reset_context.reset_req_dev = adev;
5088 reset_context.job = job;
5089 reset_context.hive = hive;
5090 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5093 * lock the device before we try to operate the linked list
5094 * if didn't get the device lock, don't touch the linked list since
5095 * others may iterating it.
5097 r = amdgpu_device_lock_hive_adev(adev, hive);
5099 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
5100 job ? job->base.id : -1);
5102 /* even we skipped this reset, still need to set the job to guilty */
5104 drm_sched_increase_karma(&job->base);
5109 * Build list of devices to reset.
5110 * In case we are in XGMI hive mode, resort the device list
5111 * to put adev in the 1st position.
5113 INIT_LIST_HEAD(&device_list);
5114 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5115 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5116 list_add_tail(&tmp_adev->reset_list, &device_list);
5117 if (!list_is_first(&adev->reset_list, &device_list))
5118 list_rotate_to_front(&adev->reset_list, &device_list);
5119 device_list_handle = &device_list;
5121 list_add_tail(&adev->reset_list, &device_list);
5122 device_list_handle = &device_list;
5125 /* block all schedulers and reset given job's ring */
5126 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5128 * Try to put the audio codec into suspend state
5129 * before gpu reset started.
5131 * Due to the power domain of the graphics device
5132 * is shared with AZ power domain. Without this,
5133 * we may change the audio hardware from behind
5134 * the audio driver's back. That will trigger
5135 * some audio codec errors.
5137 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5138 audio_suspended = true;
5140 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5142 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5144 if (!amdgpu_sriov_vf(tmp_adev))
5145 amdgpu_amdkfd_pre_reset(tmp_adev);
5148 * Mark these ASICs to be reseted as untracked first
5149 * And add them back after reset completed
5151 amdgpu_unregister_gpu_instance(tmp_adev);
5153 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5155 /* disable ras on ALL IPs */
5156 if (!need_emergency_restart &&
5157 amdgpu_device_ip_need_full_reset(tmp_adev))
5158 amdgpu_ras_suspend(tmp_adev);
5160 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5161 struct amdgpu_ring *ring = tmp_adev->rings[i];
5163 if (!ring || !ring->sched.thread)
5166 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5168 if (need_emergency_restart)
5169 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5171 atomic_inc(&tmp_adev->gpu_reset_counter);
5174 if (need_emergency_restart)
5175 goto skip_sched_resume;
5178 * Must check guilty signal here since after this point all old
5179 * HW fences are force signaled.
5181 * job->base holds a reference to parent fence
5183 if (job && job->base.s_fence->parent &&
5184 dma_fence_is_signaled(job->base.s_fence->parent)) {
5185 job_signaled = true;
5186 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5190 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5191 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5192 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5193 /*TODO Should we stop ?*/
5195 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5196 r, adev_to_drm(tmp_adev)->unique);
5197 tmp_adev->asic_reset_res = r;
5201 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5202 /* Actual ASIC resets if needed.*/
5203 /* Host driver will handle XGMI hive reset for SRIOV */
5204 if (amdgpu_sriov_vf(adev)) {
5205 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5207 adev->asic_reset_res = r;
5209 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5210 if (r && r == -EAGAIN)
5216 /* Post ASIC reset for all devs .*/
5217 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5220 * Sometimes a later bad compute job can block a good gfx job as gfx
5221 * and compute ring share internal GC HW mutually. We add an additional
5222 * guilty jobs recheck step to find the real guilty job, it synchronously
5223 * submits and pends for the first job being signaled. If it gets timeout,
5224 * we identify it as a real guilty job.
5226 if (amdgpu_gpu_recovery == 2 &&
5227 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5228 amdgpu_device_recheck_guilty_jobs(
5229 tmp_adev, device_list_handle, &reset_context);
5231 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5232 struct amdgpu_ring *ring = tmp_adev->rings[i];
5234 if (!ring || !ring->sched.thread)
5237 /* No point to resubmit jobs if we didn't HW reset*/
5238 if (!tmp_adev->asic_reset_res && !job_signaled)
5239 drm_sched_resubmit_jobs(&ring->sched);
5241 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5244 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5245 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5248 if (tmp_adev->asic_reset_res)
5249 r = tmp_adev->asic_reset_res;
5251 tmp_adev->asic_reset_res = 0;
5254 /* bad news, how to tell it to userspace ? */
5255 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5256 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5258 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5259 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5260 DRM_WARN("smart shift update failed\n");
5265 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5266 /* unlock kfd: SRIOV would do it separately */
5267 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5268 amdgpu_amdkfd_post_reset(tmp_adev);
5270 /* kfd_post_reset will do nothing if kfd device is not initialized,
5271 * need to bring up kfd here if it's not be initialized before
5273 if (!adev->kfd.init_complete)
5274 amdgpu_amdkfd_device_init(adev);
5276 if (audio_suspended)
5277 amdgpu_device_resume_display_audio(tmp_adev);
5278 amdgpu_device_unlock_adev(tmp_adev);
5283 atomic_set(&hive->in_reset, 0);
5284 mutex_unlock(&hive->hive_lock);
5285 amdgpu_put_xgmi_hive(hive);
5288 if (r && r != -EAGAIN)
5289 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5294 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5296 * @adev: amdgpu_device pointer
5298 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5299 * and lanes) of the slot the device is in. Handles APUs and
5300 * virtualized environments where PCIE config space may not be available.
5302 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5304 struct pci_dev *pdev;
5305 enum pci_bus_speed speed_cap, platform_speed_cap;
5306 enum pcie_link_width platform_link_width;
5308 if (amdgpu_pcie_gen_cap)
5309 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5311 if (amdgpu_pcie_lane_cap)
5312 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5314 /* covers APUs as well */
5315 if (pci_is_root_bus(adev->pdev->bus)) {
5316 if (adev->pm.pcie_gen_mask == 0)
5317 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5318 if (adev->pm.pcie_mlw_mask == 0)
5319 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5323 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5326 pcie_bandwidth_available(adev->pdev, NULL,
5327 &platform_speed_cap, &platform_link_width);
5329 if (adev->pm.pcie_gen_mask == 0) {
5332 speed_cap = pcie_get_speed_cap(pdev);
5333 if (speed_cap == PCI_SPEED_UNKNOWN) {
5334 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5335 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5336 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5338 if (speed_cap == PCIE_SPEED_32_0GT)
5339 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5340 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5341 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5342 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5343 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5344 else if (speed_cap == PCIE_SPEED_16_0GT)
5345 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5346 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5347 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5348 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5349 else if (speed_cap == PCIE_SPEED_8_0GT)
5350 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5351 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5352 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5353 else if (speed_cap == PCIE_SPEED_5_0GT)
5354 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5355 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5357 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5360 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5361 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5362 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5364 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5365 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5366 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5367 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5368 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5369 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5370 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5371 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5372 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5373 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5374 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5375 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5376 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5377 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5378 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5379 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5380 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5381 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5383 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5387 if (adev->pm.pcie_mlw_mask == 0) {
5388 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5389 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5391 switch (platform_link_width) {
5393 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5394 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5395 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5396 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5397 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5398 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5399 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5402 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5403 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5404 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5405 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5406 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5407 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5410 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5411 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5412 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5413 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5414 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5417 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5418 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5419 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5420 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5423 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5424 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5425 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5428 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5429 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5432 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5441 int amdgpu_device_baco_enter(struct drm_device *dev)
5443 struct amdgpu_device *adev = drm_to_adev(dev);
5444 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5446 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5449 if (ras && adev->ras_enabled &&
5450 adev->nbio.funcs->enable_doorbell_interrupt)
5451 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5453 return amdgpu_dpm_baco_enter(adev);
5456 int amdgpu_device_baco_exit(struct drm_device *dev)
5458 struct amdgpu_device *adev = drm_to_adev(dev);
5459 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5462 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5465 ret = amdgpu_dpm_baco_exit(adev);
5469 if (ras && adev->ras_enabled &&
5470 adev->nbio.funcs->enable_doorbell_interrupt)
5471 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5473 if (amdgpu_passthrough(adev) &&
5474 adev->nbio.funcs->clear_doorbell_interrupt)
5475 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5480 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5484 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5485 struct amdgpu_ring *ring = adev->rings[i];
5487 if (!ring || !ring->sched.thread)
5490 cancel_delayed_work_sync(&ring->sched.work_tdr);
5495 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5496 * @pdev: PCI device struct
5497 * @state: PCI channel state
5499 * Description: Called when a PCI error is detected.
5501 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5503 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5505 struct drm_device *dev = pci_get_drvdata(pdev);
5506 struct amdgpu_device *adev = drm_to_adev(dev);
5509 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5511 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5512 DRM_WARN("No support for XGMI hive yet...");
5513 return PCI_ERS_RESULT_DISCONNECT;
5516 adev->pci_channel_state = state;
5519 case pci_channel_io_normal:
5520 return PCI_ERS_RESULT_CAN_RECOVER;
5521 /* Fatal error, prepare for slot reset */
5522 case pci_channel_io_frozen:
5524 * Cancel and wait for all TDRs in progress if failing to
5525 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5527 * Locking adev->reset_sem will prevent any external access
5528 * to GPU during PCI error recovery
5530 while (!amdgpu_device_lock_adev(adev, NULL))
5531 amdgpu_cancel_all_tdr(adev);
5534 * Block any work scheduling as we do for regular GPU reset
5535 * for the duration of the recovery
5537 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5538 struct amdgpu_ring *ring = adev->rings[i];
5540 if (!ring || !ring->sched.thread)
5543 drm_sched_stop(&ring->sched, NULL);
5545 atomic_inc(&adev->gpu_reset_counter);
5546 return PCI_ERS_RESULT_NEED_RESET;
5547 case pci_channel_io_perm_failure:
5548 /* Permanent error, prepare for device removal */
5549 return PCI_ERS_RESULT_DISCONNECT;
5552 return PCI_ERS_RESULT_NEED_RESET;
5556 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5557 * @pdev: pointer to PCI device
5559 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5562 DRM_INFO("PCI error: mmio enabled callback!!\n");
5564 /* TODO - dump whatever for debugging purposes */
5566 /* This called only if amdgpu_pci_error_detected returns
5567 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5568 * works, no need to reset slot.
5571 return PCI_ERS_RESULT_RECOVERED;
5575 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5576 * @pdev: PCI device struct
5578 * Description: This routine is called by the pci error recovery
5579 * code after the PCI slot has been reset, just before we
5580 * should resume normal operations.
5582 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5584 struct drm_device *dev = pci_get_drvdata(pdev);
5585 struct amdgpu_device *adev = drm_to_adev(dev);
5587 struct amdgpu_reset_context reset_context;
5589 struct list_head device_list;
5591 DRM_INFO("PCI error: slot reset callback!!\n");
5593 memset(&reset_context, 0, sizeof(reset_context));
5595 INIT_LIST_HEAD(&device_list);
5596 list_add_tail(&adev->reset_list, &device_list);
5598 /* wait for asic to come out of reset */
5601 /* Restore PCI confspace */
5602 amdgpu_device_load_pci_state(pdev);
5604 /* confirm ASIC came out of reset */
5605 for (i = 0; i < adev->usec_timeout; i++) {
5606 memsize = amdgpu_asic_get_config_memsize(adev);
5608 if (memsize != 0xffffffff)
5612 if (memsize == 0xffffffff) {
5617 reset_context.method = AMD_RESET_METHOD_NONE;
5618 reset_context.reset_req_dev = adev;
5619 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5620 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5622 adev->no_hw_access = true;
5623 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5624 adev->no_hw_access = false;
5628 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5632 if (amdgpu_device_cache_pci_state(adev->pdev))
5633 pci_restore_state(adev->pdev);
5635 DRM_INFO("PCIe error recovery succeeded\n");
5637 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5638 amdgpu_device_unlock_adev(adev);
5641 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5645 * amdgpu_pci_resume() - resume normal ops after PCI reset
5646 * @pdev: pointer to PCI device
5648 * Called when the error recovery driver tells us that its
5649 * OK to resume normal operation.
5651 void amdgpu_pci_resume(struct pci_dev *pdev)
5653 struct drm_device *dev = pci_get_drvdata(pdev);
5654 struct amdgpu_device *adev = drm_to_adev(dev);
5658 DRM_INFO("PCI error: resume callback!!\n");
5660 /* Only continue execution for the case of pci_channel_io_frozen */
5661 if (adev->pci_channel_state != pci_channel_io_frozen)
5664 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5665 struct amdgpu_ring *ring = adev->rings[i];
5667 if (!ring || !ring->sched.thread)
5671 drm_sched_resubmit_jobs(&ring->sched);
5672 drm_sched_start(&ring->sched, true);
5675 amdgpu_device_unlock_adev(adev);
5678 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5680 struct drm_device *dev = pci_get_drvdata(pdev);
5681 struct amdgpu_device *adev = drm_to_adev(dev);
5684 r = pci_save_state(pdev);
5686 kfree(adev->pci_state);
5688 adev->pci_state = pci_store_saved_state(pdev);
5690 if (!adev->pci_state) {
5691 DRM_ERROR("Failed to store PCI saved state");
5695 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5702 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5704 struct drm_device *dev = pci_get_drvdata(pdev);
5705 struct amdgpu_device *adev = drm_to_adev(dev);
5708 if (!adev->pci_state)
5711 r = pci_load_saved_state(pdev, adev->pci_state);
5714 pci_restore_state(pdev);
5716 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5723 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5724 struct amdgpu_ring *ring)
5726 #ifdef CONFIG_X86_64
5727 if (adev->flags & AMD_IS_APU)
5730 if (adev->gmc.xgmi.connected_to_cpu)
5733 if (ring && ring->funcs->emit_hdp_flush)
5734 amdgpu_ring_emit_hdp_flush(ring);
5736 amdgpu_asic_flush_hdp(adev, ring);
5739 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5740 struct amdgpu_ring *ring)
5742 #ifdef CONFIG_X86_64
5743 if (adev->flags & AMD_IS_APU)
5746 if (adev->gmc.xgmi.connected_to_cpu)
5749 amdgpu_asic_invalidate_hdp(adev, ring);
5753 * amdgpu_device_halt() - bring hardware to some kind of halt state
5755 * @adev: amdgpu_device pointer
5757 * Bring hardware to some kind of halt state so that no one can touch it
5758 * any more. It will help to maintain error context when error occurred.
5759 * Compare to a simple hang, the system will keep stable at least for SSH
5760 * access. Then it should be trivial to inspect the hardware state and
5761 * see what's going on. Implemented as following:
5763 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5764 * clears all CPU mappings to device, disallows remappings through page faults
5765 * 2. amdgpu_irq_disable_all() disables all interrupts
5766 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5767 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5768 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5769 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5770 * flush any in flight DMA operations
5772 void amdgpu_device_halt(struct amdgpu_device *adev)
5774 struct pci_dev *pdev = adev->pdev;
5775 struct drm_device *ddev = adev_to_drm(adev);
5777 drm_dev_unplug(ddev);
5779 amdgpu_irq_disable_all(adev);
5781 amdgpu_fence_driver_hw_fini(adev);
5783 adev->no_hw_access = true;
5785 amdgpu_device_unmap_mmio(adev);
5787 pci_disable_device(pdev);
5788 pci_wait_for_pending_transaction(pdev);
5791 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5794 unsigned long flags, address, data;
5797 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5798 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5800 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5801 WREG32(address, reg * 4);
5802 (void)RREG32(address);
5804 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5808 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5811 unsigned long flags, address, data;
5813 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5814 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5816 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5817 WREG32(address, reg * 4);
5818 (void)RREG32(address);
5821 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);