2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_probe_helper.h>
37 #include <drm/amdgpu_drm.h>
38 #include <linux/vgaarb.h>
39 #include <linux/vga_switcheroo.h>
40 #include <linux/efi.h>
42 #include "amdgpu_trace.h"
43 #include "amdgpu_i2c.h"
45 #include "amdgpu_atombios.h"
46 #include "amdgpu_atomfirmware.h"
48 #ifdef CONFIG_DRM_AMDGPU_SI
51 #ifdef CONFIG_DRM_AMDGPU_CIK
57 #include "bif/bif_4_1_d.h"
58 #include <linux/pci.h>
59 #include <linux/firmware.h>
60 #include "amdgpu_vf_error.h"
62 #include "amdgpu_amdkfd.h"
63 #include "amdgpu_pm.h"
65 #include "amdgpu_xgmi.h"
66 #include "amdgpu_ras.h"
67 #include "amdgpu_pmu.h"
68 #include "amdgpu_fru_eeprom.h"
69 #include "amdgpu_reset.h"
71 #include <linux/suspend.h>
72 #include <drm/task_barrier.h>
73 #include <linux/pm_runtime.h>
75 #include <drm/drm_drv.h>
77 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
90 #define AMDGPU_RESUME_MS 2000
92 const char *amdgpu_asic_name[] = {
134 * DOC: pcie_replay_count
136 * The amdgpu driver provides a sysfs API for reporting the total number
137 * of PCIe replays (NAKs)
138 * The file pcie_replay_count is used for this and returns the total
139 * number of replays as a sum of the NAKs generated and NAKs received
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 struct device_attribute *attr, char *buf)
145 struct drm_device *ddev = dev_get_drvdata(dev);
146 struct amdgpu_device *adev = drm_to_adev(ddev);
147 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
149 return sysfs_emit(buf, "%llu\n", cnt);
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 amdgpu_device_get_pcie_replay_count, NULL);
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
160 * The amdgpu driver provides a sysfs API for reporting the product name
162 * The file serial_number is used for this and returns the product name
163 * as returned from the FRU.
164 * NOTE: This is only available for certain server cards
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 struct device_attribute *attr, char *buf)
170 struct drm_device *ddev = dev_get_drvdata(dev);
171 struct amdgpu_device *adev = drm_to_adev(ddev);
173 return sysfs_emit(buf, "%s\n", adev->product_name);
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 amdgpu_device_get_product_name, NULL);
180 * DOC: product_number
182 * The amdgpu driver provides a sysfs API for reporting the part number
184 * The file serial_number is used for this and returns the part number
185 * as returned from the FRU.
186 * NOTE: This is only available for certain server cards
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 struct device_attribute *attr, char *buf)
192 struct drm_device *ddev = dev_get_drvdata(dev);
193 struct amdgpu_device *adev = drm_to_adev(ddev);
195 return sysfs_emit(buf, "%s\n", adev->product_number);
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 amdgpu_device_get_product_number, NULL);
204 * The amdgpu driver provides a sysfs API for reporting the serial number
206 * The file serial_number is used for this and returns the serial number
207 * as returned from the FRU.
208 * NOTE: This is only available for certain server cards
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 struct device_attribute *attr, char *buf)
214 struct drm_device *ddev = dev_get_drvdata(dev);
215 struct amdgpu_device *adev = drm_to_adev(ddev);
217 return sysfs_emit(buf, "%s\n", adev->serial);
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 amdgpu_device_get_serial_number, NULL);
224 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
226 * @dev: drm_device pointer
228 * Returns true if the device is a dGPU with ATPX power control,
229 * otherwise return false.
231 bool amdgpu_device_supports_px(struct drm_device *dev)
233 struct amdgpu_device *adev = drm_to_adev(dev);
235 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
241 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
243 * @dev: drm_device pointer
245 * Returns true if the device is a dGPU with ACPI power control,
246 * otherwise return false.
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
250 struct amdgpu_device *adev = drm_to_adev(dev);
253 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
259 * amdgpu_device_supports_baco - Does the device support BACO
261 * @dev: drm_device pointer
263 * Returns true if the device supporte BACO,
264 * otherwise return false.
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
268 struct amdgpu_device *adev = drm_to_adev(dev);
270 return amdgpu_asic_supports_baco(adev);
274 * amdgpu_device_supports_smart_shift - Is the device dGPU with
275 * smart shift support
277 * @dev: drm_device pointer
279 * Returns true if the device is a dGPU with Smart Shift support,
280 * otherwise returns false.
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
284 return (amdgpu_device_supports_boco(dev) &&
285 amdgpu_acpi_is_power_shift_control_supported());
289 * VRAM access helper functions
293 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
295 * @adev: amdgpu_device pointer
296 * @pos: offset of the buffer in vram
297 * @buf: virtual address of the buffer in system memory
298 * @size: read/write size, sizeof(@buf) must > @size
299 * @write: true - write to vram, otherwise - read from vram
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 void *buf, size_t size, bool write)
305 uint32_t hi = ~0, tmp = 0;
306 uint32_t *data = buf;
310 if (!drm_dev_enter(adev_to_drm(adev), &idx))
313 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
315 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 for (last = pos + size; pos < last; pos += 4) {
319 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
321 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
325 WREG32_NO_KIQ(mmMM_DATA, *data++);
327 *data++ = RREG32_NO_KIQ(mmMM_DATA);
330 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
335 * amdgpu_device_aper_access - access vram by vram aperature
337 * @adev: amdgpu_device pointer
338 * @pos: offset of the buffer in vram
339 * @buf: virtual address of the buffer in system memory
340 * @size: read/write size, sizeof(@buf) must > @size
341 * @write: true - write to vram, otherwise - read from vram
343 * The return value means how many bytes have been transferred.
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 void *buf, size_t size, bool write)
353 if (!adev->mman.aper_base_kaddr)
356 last = min(pos + size, adev->gmc.visible_vram_size);
358 addr = adev->mman.aper_base_kaddr + pos;
362 memcpy_toio(addr, buf, count);
364 amdgpu_device_flush_hdp(adev, NULL);
366 amdgpu_device_invalidate_hdp(adev, NULL);
368 memcpy_fromio(buf, addr, count);
380 * amdgpu_device_vram_access - read/write a buffer in vram
382 * @adev: amdgpu_device pointer
383 * @pos: offset of the buffer in vram
384 * @buf: virtual address of the buffer in system memory
385 * @size: read/write size, sizeof(@buf) must > @size
386 * @write: true - write to vram, otherwise - read from vram
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 void *buf, size_t size, bool write)
393 /* try to using vram apreature to access vram first */
394 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
397 /* using MM to access rest vram */
400 amdgpu_device_mm_access(adev, pos, buf, size, write);
405 * register access helper functions.
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
411 if (adev->no_hw_access)
414 #ifdef CONFIG_LOCKDEP
416 * This is a bit complicated to understand, so worth a comment. What we assert
417 * here is that the GPU reset is not running on another thread in parallel.
419 * For this we trylock the read side of the reset semaphore, if that succeeds
420 * we know that the reset is not running in paralell.
422 * If the trylock fails we assert that we are either already holding the read
423 * side of the lock or are the reset thread itself and hold the write side of
427 if (down_read_trylock(&adev->reset_sem))
428 up_read(&adev->reset_sem);
430 lockdep_assert_held(&adev->reset_sem);
437 * amdgpu_device_rreg - read a memory mapped IO or indirect register
439 * @adev: amdgpu_device pointer
440 * @reg: dword aligned register offset
441 * @acc_flags: access flags which require special behavior
443 * Returns the 32 bit value from the offset specified.
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 uint32_t reg, uint32_t acc_flags)
450 if (amdgpu_device_skip_hw_access(adev))
453 if ((reg * 4) < adev->rmmio_size) {
454 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 amdgpu_sriov_runtime(adev) &&
456 down_read_trylock(&adev->reset_sem)) {
457 ret = amdgpu_kiq_rreg(adev, reg);
458 up_read(&adev->reset_sem);
460 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
463 ret = adev->pcie_rreg(adev, reg * 4);
466 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
472 * MMIO register read with bytes helper functions
473 * @offset:bytes offset from MMIO start
478 * amdgpu_mm_rreg8 - read a memory mapped IO register
480 * @adev: amdgpu_device pointer
481 * @offset: byte aligned register offset
483 * Returns the 8 bit value from the offset specified.
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
487 if (amdgpu_device_skip_hw_access(adev))
490 if (offset < adev->rmmio_size)
491 return (readb(adev->rmmio + offset));
496 * MMIO register write with bytes helper functions
497 * @offset:bytes offset from MMIO start
498 * @value: the value want to be written to the register
502 * amdgpu_mm_wreg8 - read a memory mapped IO register
504 * @adev: amdgpu_device pointer
505 * @offset: byte aligned register offset
506 * @value: 8 bit value to write
508 * Writes the value specified to the offset specified.
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
512 if (amdgpu_device_skip_hw_access(adev))
515 if (offset < adev->rmmio_size)
516 writeb(value, adev->rmmio + offset);
522 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
524 * @adev: amdgpu_device pointer
525 * @reg: dword aligned register offset
526 * @v: 32 bit value to write to the register
527 * @acc_flags: access flags which require special behavior
529 * Writes the value specified to the offset specified.
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 uint32_t reg, uint32_t v,
535 if (amdgpu_device_skip_hw_access(adev))
538 if ((reg * 4) < adev->rmmio_size) {
539 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 amdgpu_sriov_runtime(adev) &&
541 down_read_trylock(&adev->reset_sem)) {
542 amdgpu_kiq_wreg(adev, reg, v);
543 up_read(&adev->reset_sem);
545 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
548 adev->pcie_wreg(adev, reg * 4, v);
551 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
555 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
557 * this function is invoked only the debugfs register access
559 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
560 uint32_t reg, uint32_t v)
562 if (amdgpu_device_skip_hw_access(adev))
565 if (amdgpu_sriov_fullaccess(adev) &&
566 adev->gfx.rlc.funcs &&
567 adev->gfx.rlc.funcs->is_rlcg_access_range) {
568 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
569 return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
570 } else if ((reg * 4) >= adev->rmmio_size) {
571 adev->pcie_wreg(adev, reg * 4, v);
573 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 * amdgpu_mm_rdoorbell - read a doorbell dword
580 * @adev: amdgpu_device pointer
581 * @index: doorbell index
583 * Returns the value in the doorbell aperture at the
584 * requested doorbell index (CIK).
586 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
588 if (amdgpu_device_skip_hw_access(adev))
591 if (index < adev->doorbell.num_doorbells) {
592 return readl(adev->doorbell.ptr + index);
594 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
600 * amdgpu_mm_wdoorbell - write a doorbell dword
602 * @adev: amdgpu_device pointer
603 * @index: doorbell index
606 * Writes @v to the doorbell aperture at the
607 * requested doorbell index (CIK).
609 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
611 if (amdgpu_device_skip_hw_access(adev))
614 if (index < adev->doorbell.num_doorbells) {
615 writel(v, adev->doorbell.ptr + index);
617 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
624 * @adev: amdgpu_device pointer
625 * @index: doorbell index
627 * Returns the value in the doorbell aperture at the
628 * requested doorbell index (VEGA10+).
630 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
632 if (amdgpu_device_skip_hw_access(adev))
635 if (index < adev->doorbell.num_doorbells) {
636 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
638 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
644 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
646 * @adev: amdgpu_device pointer
647 * @index: doorbell index
650 * Writes @v to the doorbell aperture at the
651 * requested doorbell index (VEGA10+).
653 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
655 if (amdgpu_device_skip_hw_access(adev))
658 if (index < adev->doorbell.num_doorbells) {
659 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
661 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 * amdgpu_device_indirect_rreg - read an indirect register
668 * @adev: amdgpu_device pointer
669 * @pcie_index: mmio register offset
670 * @pcie_data: mmio register offset
671 * @reg_addr: indirect register address to read from
673 * Returns the value of indirect register @reg_addr
675 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
676 u32 pcie_index, u32 pcie_data,
681 void __iomem *pcie_index_offset;
682 void __iomem *pcie_data_offset;
684 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
685 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
686 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
688 writel(reg_addr, pcie_index_offset);
689 readl(pcie_index_offset);
690 r = readl(pcie_data_offset);
691 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
697 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
699 * @adev: amdgpu_device pointer
700 * @pcie_index: mmio register offset
701 * @pcie_data: mmio register offset
702 * @reg_addr: indirect register address to read from
704 * Returns the value of indirect register @reg_addr
706 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
707 u32 pcie_index, u32 pcie_data,
712 void __iomem *pcie_index_offset;
713 void __iomem *pcie_data_offset;
715 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
716 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
717 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
719 /* read low 32 bits */
720 writel(reg_addr, pcie_index_offset);
721 readl(pcie_index_offset);
722 r = readl(pcie_data_offset);
723 /* read high 32 bits */
724 writel(reg_addr + 4, pcie_index_offset);
725 readl(pcie_index_offset);
726 r |= ((u64)readl(pcie_data_offset) << 32);
727 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
733 * amdgpu_device_indirect_wreg - write an indirect register address
735 * @adev: amdgpu_device pointer
736 * @pcie_index: mmio register offset
737 * @pcie_data: mmio register offset
738 * @reg_addr: indirect register offset
739 * @reg_data: indirect register data
742 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
743 u32 pcie_index, u32 pcie_data,
744 u32 reg_addr, u32 reg_data)
747 void __iomem *pcie_index_offset;
748 void __iomem *pcie_data_offset;
750 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
751 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
752 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
754 writel(reg_addr, pcie_index_offset);
755 readl(pcie_index_offset);
756 writel(reg_data, pcie_data_offset);
757 readl(pcie_data_offset);
758 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
762 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
764 * @adev: amdgpu_device pointer
765 * @pcie_index: mmio register offset
766 * @pcie_data: mmio register offset
767 * @reg_addr: indirect register offset
768 * @reg_data: indirect register data
771 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
772 u32 pcie_index, u32 pcie_data,
773 u32 reg_addr, u64 reg_data)
776 void __iomem *pcie_index_offset;
777 void __iomem *pcie_data_offset;
779 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
780 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
781 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
783 /* write low 32 bits */
784 writel(reg_addr, pcie_index_offset);
785 readl(pcie_index_offset);
786 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
787 readl(pcie_data_offset);
788 /* write high 32 bits */
789 writel(reg_addr + 4, pcie_index_offset);
790 readl(pcie_index_offset);
791 writel((u32)(reg_data >> 32), pcie_data_offset);
792 readl(pcie_data_offset);
793 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
797 * amdgpu_invalid_rreg - dummy reg read function
799 * @adev: amdgpu_device pointer
800 * @reg: offset of register
802 * Dummy register read function. Used for register blocks
803 * that certain asics don't have (all asics).
804 * Returns the value in the register.
806 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
808 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
814 * amdgpu_invalid_wreg - dummy reg write function
816 * @adev: amdgpu_device pointer
817 * @reg: offset of register
818 * @v: value to write to the register
820 * Dummy register read function. Used for register blocks
821 * that certain asics don't have (all asics).
823 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
825 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
831 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
833 * @adev: amdgpu_device pointer
834 * @reg: offset of register
836 * Dummy register read function. Used for register blocks
837 * that certain asics don't have (all asics).
838 * Returns the value in the register.
840 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
842 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
848 * amdgpu_invalid_wreg64 - dummy reg write function
850 * @adev: amdgpu_device pointer
851 * @reg: offset of register
852 * @v: value to write to the register
854 * Dummy register read function. Used for register blocks
855 * that certain asics don't have (all asics).
857 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
859 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
865 * amdgpu_block_invalid_rreg - dummy reg read function
867 * @adev: amdgpu_device pointer
868 * @block: offset of instance
869 * @reg: offset of register
871 * Dummy register read function. Used for register blocks
872 * that certain asics don't have (all asics).
873 * Returns the value in the register.
875 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
876 uint32_t block, uint32_t reg)
878 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
885 * amdgpu_block_invalid_wreg - dummy reg write function
887 * @adev: amdgpu_device pointer
888 * @block: offset of instance
889 * @reg: offset of register
890 * @v: value to write to the register
892 * Dummy register read function. Used for register blocks
893 * that certain asics don't have (all asics).
895 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
897 uint32_t reg, uint32_t v)
899 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
905 * amdgpu_device_asic_init - Wrapper for atom asic_init
907 * @adev: amdgpu_device pointer
909 * Does any asic specific work and then calls atom asic init.
911 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
913 amdgpu_asic_pre_asic_init(adev);
915 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
919 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
921 * @adev: amdgpu_device pointer
923 * Allocates a scratch page of VRAM for use by various things in the
926 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
928 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
929 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
930 &adev->vram_scratch.robj,
931 &adev->vram_scratch.gpu_addr,
932 (void **)&adev->vram_scratch.ptr);
936 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
938 * @adev: amdgpu_device pointer
940 * Frees the VRAM scratch page.
942 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
944 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
948 * amdgpu_device_program_register_sequence - program an array of registers.
950 * @adev: amdgpu_device pointer
951 * @registers: pointer to the register array
952 * @array_size: size of the register array
954 * Programs an array or registers with and and or masks.
955 * This is a helper for setting golden registers.
957 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
958 const u32 *registers,
959 const u32 array_size)
961 u32 tmp, reg, and_mask, or_mask;
967 for (i = 0; i < array_size; i +=3) {
968 reg = registers[i + 0];
969 and_mask = registers[i + 1];
970 or_mask = registers[i + 2];
972 if (and_mask == 0xffffffff) {
977 if (adev->family >= AMDGPU_FAMILY_AI)
978 tmp |= (or_mask & and_mask);
987 * amdgpu_device_pci_config_reset - reset the GPU
989 * @adev: amdgpu_device pointer
991 * Resets the GPU using the pci config reset sequence.
992 * Only applicable to asics prior to vega10.
994 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
996 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1000 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1002 * @adev: amdgpu_device pointer
1004 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1006 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1008 return pci_reset_function(adev->pdev);
1012 * GPU doorbell aperture helpers function.
1015 * amdgpu_device_doorbell_init - Init doorbell driver information.
1017 * @adev: amdgpu_device pointer
1019 * Init doorbell driver information (CIK)
1020 * Returns 0 on success, error on failure.
1022 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1025 /* No doorbell on SI hardware generation */
1026 if (adev->asic_type < CHIP_BONAIRE) {
1027 adev->doorbell.base = 0;
1028 adev->doorbell.size = 0;
1029 adev->doorbell.num_doorbells = 0;
1030 adev->doorbell.ptr = NULL;
1034 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1037 amdgpu_asic_init_doorbell_index(adev);
1039 /* doorbell bar mapping */
1040 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1041 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1043 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1044 adev->doorbell_index.max_assignment+1);
1045 if (adev->doorbell.num_doorbells == 0)
1048 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1049 * paging queue doorbell use the second page. The
1050 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1051 * doorbells are in the first page. So with paging queue enabled,
1052 * the max num_doorbells should + 1 page (0x400 in dword)
1054 if (adev->asic_type >= CHIP_VEGA10)
1055 adev->doorbell.num_doorbells += 0x400;
1057 adev->doorbell.ptr = ioremap(adev->doorbell.base,
1058 adev->doorbell.num_doorbells *
1060 if (adev->doorbell.ptr == NULL)
1067 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1069 * @adev: amdgpu_device pointer
1071 * Tear down doorbell driver information (CIK)
1073 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1075 iounmap(adev->doorbell.ptr);
1076 adev->doorbell.ptr = NULL;
1082 * amdgpu_device_wb_*()
1083 * Writeback is the method by which the GPU updates special pages in memory
1084 * with the status of certain GPU events (fences, ring pointers,etc.).
1088 * amdgpu_device_wb_fini - Disable Writeback and free memory
1090 * @adev: amdgpu_device pointer
1092 * Disables Writeback and frees the Writeback memory (all asics).
1093 * Used at driver shutdown.
1095 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1097 if (adev->wb.wb_obj) {
1098 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1100 (void **)&adev->wb.wb);
1101 adev->wb.wb_obj = NULL;
1106 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1108 * @adev: amdgpu_device pointer
1110 * Initializes writeback and allocates writeback memory (all asics).
1111 * Used at driver startup.
1112 * Returns 0 on success or an -error on failure.
1114 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1118 if (adev->wb.wb_obj == NULL) {
1119 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1120 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1121 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1122 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1123 (void **)&adev->wb.wb);
1125 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1129 adev->wb.num_wb = AMDGPU_MAX_WB;
1130 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1132 /* clear wb memory */
1133 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1140 * amdgpu_device_wb_get - Allocate a wb entry
1142 * @adev: amdgpu_device pointer
1145 * Allocate a wb slot for use by the driver (all asics).
1146 * Returns 0 on success or -EINVAL on failure.
1148 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1150 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1152 if (offset < adev->wb.num_wb) {
1153 __set_bit(offset, adev->wb.used);
1154 *wb = offset << 3; /* convert to dw offset */
1162 * amdgpu_device_wb_free - Free a wb entry
1164 * @adev: amdgpu_device pointer
1167 * Free a wb slot allocated for use by the driver (all asics)
1169 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1172 if (wb < adev->wb.num_wb)
1173 __clear_bit(wb, adev->wb.used);
1177 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1179 * @adev: amdgpu_device pointer
1181 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1182 * to fail, but if any of the BARs is not accessible after the size we abort
1183 * driver loading by returning -ENODEV.
1185 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1187 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1188 struct pci_bus *root;
1189 struct resource *res;
1195 if (amdgpu_sriov_vf(adev))
1198 /* skip if the bios has already enabled large BAR */
1199 if (adev->gmc.real_vram_size &&
1200 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1203 /* Check if the root BUS has 64bit memory resources */
1204 root = adev->pdev->bus;
1205 while (root->parent)
1206 root = root->parent;
1208 pci_bus_for_each_resource(root, res, i) {
1209 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1210 res->start > 0x100000000ull)
1214 /* Trying to resize is pointless without a root hub window above 4GB */
1218 /* Limit the BAR size to what is available */
1219 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1222 /* Disable memory decoding while we change the BAR addresses and size */
1223 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1224 pci_write_config_word(adev->pdev, PCI_COMMAND,
1225 cmd & ~PCI_COMMAND_MEMORY);
1227 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1228 amdgpu_device_doorbell_fini(adev);
1229 if (adev->asic_type >= CHIP_BONAIRE)
1230 pci_release_resource(adev->pdev, 2);
1232 pci_release_resource(adev->pdev, 0);
1234 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1236 DRM_INFO("Not enough PCI address space for a large BAR.");
1237 else if (r && r != -ENOTSUPP)
1238 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1240 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1242 /* When the doorbell or fb BAR isn't available we have no chance of
1245 r = amdgpu_device_doorbell_init(adev);
1246 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1249 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1255 * GPU helpers function.
1258 * amdgpu_device_need_post - check if the hw need post or not
1260 * @adev: amdgpu_device pointer
1262 * Check if the asic has been initialized (all asics) at driver startup
1263 * or post is needed if hw reset is performed.
1264 * Returns true if need or false if not.
1266 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1270 if (amdgpu_sriov_vf(adev))
1273 if (amdgpu_passthrough(adev)) {
1274 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1275 * some old smc fw still need driver do vPost otherwise gpu hang, while
1276 * those smc fw version above 22.15 doesn't have this flaw, so we force
1277 * vpost executed for smc version below 22.15
1279 if (adev->asic_type == CHIP_FIJI) {
1282 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1283 /* force vPost if error occured */
1287 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1288 if (fw_ver < 0x00160e00)
1293 /* Don't post if we need to reset whole hive on init */
1294 if (adev->gmc.xgmi.pending_reset)
1297 if (adev->has_hw_reset) {
1298 adev->has_hw_reset = false;
1302 /* bios scratch used on CIK+ */
1303 if (adev->asic_type >= CHIP_BONAIRE)
1304 return amdgpu_atombios_scratch_need_asic_init(adev);
1306 /* check MEM_SIZE for older asics */
1307 reg = amdgpu_asic_get_config_memsize(adev);
1309 if ((reg != 0) && (reg != 0xffffffff))
1315 /* if we get transitioned to only one device, take VGA back */
1317 * amdgpu_device_vga_set_decode - enable/disable vga decode
1319 * @pdev: PCI device pointer
1320 * @state: enable/disable vga decode
1322 * Enable/disable vga decode (all asics).
1323 * Returns VGA resource flags.
1325 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1328 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1329 amdgpu_asic_set_vga_state(adev, state);
1331 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1332 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1334 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1338 * amdgpu_device_check_block_size - validate the vm block size
1340 * @adev: amdgpu_device pointer
1342 * Validates the vm block size specified via module parameter.
1343 * The vm block size defines number of bits in page table versus page directory,
1344 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1345 * page table and the remaining bits are in the page directory.
1347 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1349 /* defines number of bits in page table versus page directory,
1350 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1351 * page table and the remaining bits are in the page directory */
1352 if (amdgpu_vm_block_size == -1)
1355 if (amdgpu_vm_block_size < 9) {
1356 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1357 amdgpu_vm_block_size);
1358 amdgpu_vm_block_size = -1;
1363 * amdgpu_device_check_vm_size - validate the vm size
1365 * @adev: amdgpu_device pointer
1367 * Validates the vm size in GB specified via module parameter.
1368 * The VM size is the size of the GPU virtual memory space in GB.
1370 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1372 /* no need to check the default value */
1373 if (amdgpu_vm_size == -1)
1376 if (amdgpu_vm_size < 1) {
1377 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1379 amdgpu_vm_size = -1;
1383 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1386 bool is_os_64 = (sizeof(void *) == 8);
1387 uint64_t total_memory;
1388 uint64_t dram_size_seven_GB = 0x1B8000000;
1389 uint64_t dram_size_three_GB = 0xB8000000;
1391 if (amdgpu_smu_memory_pool_size == 0)
1395 DRM_WARN("Not 64-bit OS, feature not supported\n");
1399 total_memory = (uint64_t)si.totalram * si.mem_unit;
1401 if ((amdgpu_smu_memory_pool_size == 1) ||
1402 (amdgpu_smu_memory_pool_size == 2)) {
1403 if (total_memory < dram_size_three_GB)
1405 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1406 (amdgpu_smu_memory_pool_size == 8)) {
1407 if (total_memory < dram_size_seven_GB)
1410 DRM_WARN("Smu memory pool size not supported\n");
1413 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1418 DRM_WARN("No enough system memory\n");
1420 adev->pm.smu_prv_buffer_size = 0;
1423 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1425 if (!(adev->flags & AMD_IS_APU) ||
1426 adev->asic_type < CHIP_RAVEN)
1429 switch (adev->asic_type) {
1431 if (adev->pdev->device == 0x15dd)
1432 adev->apu_flags |= AMD_APU_IS_RAVEN;
1433 if (adev->pdev->device == 0x15d8)
1434 adev->apu_flags |= AMD_APU_IS_PICASSO;
1437 if ((adev->pdev->device == 0x1636) ||
1438 (adev->pdev->device == 0x164c))
1439 adev->apu_flags |= AMD_APU_IS_RENOIR;
1441 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1444 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1446 case CHIP_YELLOW_CARP:
1448 case CHIP_CYAN_SKILLFISH:
1449 if (adev->pdev->device == 0x13FE)
1450 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1460 * amdgpu_device_check_arguments - validate module params
1462 * @adev: amdgpu_device pointer
1464 * Validates certain module parameters and updates
1465 * the associated values used by the driver (all asics).
1467 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1469 if (amdgpu_sched_jobs < 4) {
1470 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1472 amdgpu_sched_jobs = 4;
1473 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1474 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1476 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1479 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1480 /* gart size must be greater or equal to 32M */
1481 dev_warn(adev->dev, "gart size (%d) too small\n",
1483 amdgpu_gart_size = -1;
1486 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1487 /* gtt size must be greater or equal to 32M */
1488 dev_warn(adev->dev, "gtt size (%d) too small\n",
1490 amdgpu_gtt_size = -1;
1493 /* valid range is between 4 and 9 inclusive */
1494 if (amdgpu_vm_fragment_size != -1 &&
1495 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1496 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1497 amdgpu_vm_fragment_size = -1;
1500 if (amdgpu_sched_hw_submission < 2) {
1501 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1502 amdgpu_sched_hw_submission);
1503 amdgpu_sched_hw_submission = 2;
1504 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1505 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1506 amdgpu_sched_hw_submission);
1507 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1510 amdgpu_device_check_smu_prv_buffer_size(adev);
1512 amdgpu_device_check_vm_size(adev);
1514 amdgpu_device_check_block_size(adev);
1516 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1518 amdgpu_gmc_tmz_set(adev);
1520 amdgpu_gmc_noretry_set(adev);
1526 * amdgpu_switcheroo_set_state - set switcheroo state
1528 * @pdev: pci dev pointer
1529 * @state: vga_switcheroo state
1531 * Callback for the switcheroo driver. Suspends or resumes the
1532 * the asics before or after it is powered up using ACPI methods.
1534 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1535 enum vga_switcheroo_state state)
1537 struct drm_device *dev = pci_get_drvdata(pdev);
1540 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1543 if (state == VGA_SWITCHEROO_ON) {
1544 pr_info("switched on\n");
1545 /* don't suspend or resume card normally */
1546 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1548 pci_set_power_state(pdev, PCI_D0);
1549 amdgpu_device_load_pci_state(pdev);
1550 r = pci_enable_device(pdev);
1552 DRM_WARN("pci_enable_device failed (%d)\n", r);
1553 amdgpu_device_resume(dev, true);
1555 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1557 pr_info("switched off\n");
1558 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1559 amdgpu_device_suspend(dev, true);
1560 amdgpu_device_cache_pci_state(pdev);
1561 /* Shut down the device */
1562 pci_disable_device(pdev);
1563 pci_set_power_state(pdev, PCI_D3cold);
1564 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1569 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1571 * @pdev: pci dev pointer
1573 * Callback for the switcheroo driver. Check of the switcheroo
1574 * state can be changed.
1575 * Returns true if the state can be changed, false if not.
1577 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1579 struct drm_device *dev = pci_get_drvdata(pdev);
1582 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1583 * locking inversion with the driver load path. And the access here is
1584 * completely racy anyway. So don't bother with locking for now.
1586 return atomic_read(&dev->open_count) == 0;
1589 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1590 .set_gpu_state = amdgpu_switcheroo_set_state,
1592 .can_switch = amdgpu_switcheroo_can_switch,
1596 * amdgpu_device_ip_set_clockgating_state - set the CG state
1598 * @dev: amdgpu_device pointer
1599 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1600 * @state: clockgating state (gate or ungate)
1602 * Sets the requested clockgating state for all instances of
1603 * the hardware IP specified.
1604 * Returns the error code from the last instance.
1606 int amdgpu_device_ip_set_clockgating_state(void *dev,
1607 enum amd_ip_block_type block_type,
1608 enum amd_clockgating_state state)
1610 struct amdgpu_device *adev = dev;
1613 for (i = 0; i < adev->num_ip_blocks; i++) {
1614 if (!adev->ip_blocks[i].status.valid)
1616 if (adev->ip_blocks[i].version->type != block_type)
1618 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1620 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1621 (void *)adev, state);
1623 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1624 adev->ip_blocks[i].version->funcs->name, r);
1630 * amdgpu_device_ip_set_powergating_state - set the PG state
1632 * @dev: amdgpu_device pointer
1633 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1634 * @state: powergating state (gate or ungate)
1636 * Sets the requested powergating state for all instances of
1637 * the hardware IP specified.
1638 * Returns the error code from the last instance.
1640 int amdgpu_device_ip_set_powergating_state(void *dev,
1641 enum amd_ip_block_type block_type,
1642 enum amd_powergating_state state)
1644 struct amdgpu_device *adev = dev;
1647 for (i = 0; i < adev->num_ip_blocks; i++) {
1648 if (!adev->ip_blocks[i].status.valid)
1650 if (adev->ip_blocks[i].version->type != block_type)
1652 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1654 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1655 (void *)adev, state);
1657 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1658 adev->ip_blocks[i].version->funcs->name, r);
1664 * amdgpu_device_ip_get_clockgating_state - get the CG state
1666 * @adev: amdgpu_device pointer
1667 * @flags: clockgating feature flags
1669 * Walks the list of IPs on the device and updates the clockgating
1670 * flags for each IP.
1671 * Updates @flags with the feature flags for each hardware IP where
1672 * clockgating is enabled.
1674 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1679 for (i = 0; i < adev->num_ip_blocks; i++) {
1680 if (!adev->ip_blocks[i].status.valid)
1682 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1683 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1688 * amdgpu_device_ip_wait_for_idle - wait for idle
1690 * @adev: amdgpu_device pointer
1691 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1693 * Waits for the request hardware IP to be idle.
1694 * Returns 0 for success or a negative error code on failure.
1696 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1697 enum amd_ip_block_type block_type)
1701 for (i = 0; i < adev->num_ip_blocks; i++) {
1702 if (!adev->ip_blocks[i].status.valid)
1704 if (adev->ip_blocks[i].version->type == block_type) {
1705 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1716 * amdgpu_device_ip_is_idle - is the hardware IP idle
1718 * @adev: amdgpu_device pointer
1719 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1721 * Check if the hardware IP is idle or not.
1722 * Returns true if it the IP is idle, false if not.
1724 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1725 enum amd_ip_block_type block_type)
1729 for (i = 0; i < adev->num_ip_blocks; i++) {
1730 if (!adev->ip_blocks[i].status.valid)
1732 if (adev->ip_blocks[i].version->type == block_type)
1733 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1740 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1742 * @adev: amdgpu_device pointer
1743 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1745 * Returns a pointer to the hardware IP block structure
1746 * if it exists for the asic, otherwise NULL.
1748 struct amdgpu_ip_block *
1749 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1750 enum amd_ip_block_type type)
1754 for (i = 0; i < adev->num_ip_blocks; i++)
1755 if (adev->ip_blocks[i].version->type == type)
1756 return &adev->ip_blocks[i];
1762 * amdgpu_device_ip_block_version_cmp
1764 * @adev: amdgpu_device pointer
1765 * @type: enum amd_ip_block_type
1766 * @major: major version
1767 * @minor: minor version
1769 * return 0 if equal or greater
1770 * return 1 if smaller or the ip_block doesn't exist
1772 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1773 enum amd_ip_block_type type,
1774 u32 major, u32 minor)
1776 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1778 if (ip_block && ((ip_block->version->major > major) ||
1779 ((ip_block->version->major == major) &&
1780 (ip_block->version->minor >= minor))))
1787 * amdgpu_device_ip_block_add
1789 * @adev: amdgpu_device pointer
1790 * @ip_block_version: pointer to the IP to add
1792 * Adds the IP block driver information to the collection of IPs
1795 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1796 const struct amdgpu_ip_block_version *ip_block_version)
1798 if (!ip_block_version)
1801 switch (ip_block_version->type) {
1802 case AMD_IP_BLOCK_TYPE_VCN:
1803 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1806 case AMD_IP_BLOCK_TYPE_JPEG:
1807 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1814 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1815 ip_block_version->funcs->name);
1817 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1823 * amdgpu_device_enable_virtual_display - enable virtual display feature
1825 * @adev: amdgpu_device pointer
1827 * Enabled the virtual display feature if the user has enabled it via
1828 * the module parameter virtual_display. This feature provides a virtual
1829 * display hardware on headless boards or in virtualized environments.
1830 * This function parses and validates the configuration string specified by
1831 * the user and configues the virtual display configuration (number of
1832 * virtual connectors, crtcs, etc.) specified.
1834 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1836 adev->enable_virtual_display = false;
1838 if (amdgpu_virtual_display) {
1839 const char *pci_address_name = pci_name(adev->pdev);
1840 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1842 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1843 pciaddstr_tmp = pciaddstr;
1844 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1845 pciaddname = strsep(&pciaddname_tmp, ",");
1846 if (!strcmp("all", pciaddname)
1847 || !strcmp(pci_address_name, pciaddname)) {
1851 adev->enable_virtual_display = true;
1854 res = kstrtol(pciaddname_tmp, 10,
1862 adev->mode_info.num_crtc = num_crtc;
1864 adev->mode_info.num_crtc = 1;
1870 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1871 amdgpu_virtual_display, pci_address_name,
1872 adev->enable_virtual_display, adev->mode_info.num_crtc);
1879 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1881 * @adev: amdgpu_device pointer
1883 * Parses the asic configuration parameters specified in the gpu info
1884 * firmware and makes them availale to the driver for use in configuring
1886 * Returns 0 on success, -EINVAL on failure.
1888 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1890 const char *chip_name;
1893 const struct gpu_info_firmware_header_v1_0 *hdr;
1895 adev->firmware.gpu_info_fw = NULL;
1897 if (adev->mman.discovery_bin) {
1898 amdgpu_discovery_get_gfx_info(adev);
1901 * FIXME: The bounding box is still needed by Navi12, so
1902 * temporarily read it from gpu_info firmware. Should be droped
1903 * when DAL no longer needs it.
1905 if (adev->asic_type != CHIP_NAVI12)
1909 switch (adev->asic_type) {
1910 #ifdef CONFIG_DRM_AMDGPU_SI
1917 #ifdef CONFIG_DRM_AMDGPU_CIK
1927 case CHIP_POLARIS10:
1928 case CHIP_POLARIS11:
1929 case CHIP_POLARIS12:
1934 case CHIP_ALDEBARAN:
1935 case CHIP_SIENNA_CICHLID:
1936 case CHIP_NAVY_FLOUNDER:
1937 case CHIP_DIMGREY_CAVEFISH:
1938 case CHIP_BEIGE_GOBY:
1942 chip_name = "vega10";
1945 chip_name = "vega12";
1948 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1949 chip_name = "raven2";
1950 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1951 chip_name = "picasso";
1953 chip_name = "raven";
1956 chip_name = "arcturus";
1959 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1960 chip_name = "renoir";
1962 chip_name = "green_sardine";
1965 chip_name = "navi10";
1968 chip_name = "navi14";
1971 chip_name = "navi12";
1974 chip_name = "vangogh";
1976 case CHIP_YELLOW_CARP:
1977 chip_name = "yellow_carp";
1981 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1982 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1985 "Failed to load gpu_info firmware \"%s\"\n",
1989 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1992 "Failed to validate gpu_info firmware \"%s\"\n",
1997 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1998 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2000 switch (hdr->version_major) {
2003 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2004 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2005 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2008 * Should be droped when DAL no longer needs it.
2010 if (adev->asic_type == CHIP_NAVI12)
2011 goto parse_soc_bounding_box;
2013 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2014 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2015 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2016 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2017 adev->gfx.config.max_texture_channel_caches =
2018 le32_to_cpu(gpu_info_fw->gc_num_tccs);
2019 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2020 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2021 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2022 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2023 adev->gfx.config.double_offchip_lds_buf =
2024 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2025 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2026 adev->gfx.cu_info.max_waves_per_simd =
2027 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2028 adev->gfx.cu_info.max_scratch_slots_per_cu =
2029 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2030 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2031 if (hdr->version_minor >= 1) {
2032 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2033 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2034 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2035 adev->gfx.config.num_sc_per_sh =
2036 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2037 adev->gfx.config.num_packer_per_sc =
2038 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2041 parse_soc_bounding_box:
2043 * soc bounding box info is not integrated in disocovery table,
2044 * we always need to parse it from gpu info firmware if needed.
2046 if (hdr->version_minor == 2) {
2047 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2048 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2049 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2050 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2056 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2065 * amdgpu_device_ip_early_init - run early init for hardware IPs
2067 * @adev: amdgpu_device pointer
2069 * Early initialization pass for hardware IPs. The hardware IPs that make
2070 * up each asic are discovered each IP's early_init callback is run. This
2071 * is the first stage in initializing the asic.
2072 * Returns 0 on success, negative error code on failure.
2074 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2078 amdgpu_device_enable_virtual_display(adev);
2080 if (amdgpu_sriov_vf(adev)) {
2081 r = amdgpu_virt_request_full_gpu(adev, true);
2086 switch (adev->asic_type) {
2087 #ifdef CONFIG_DRM_AMDGPU_SI
2093 adev->family = AMDGPU_FAMILY_SI;
2094 r = si_set_ip_blocks(adev);
2099 #ifdef CONFIG_DRM_AMDGPU_CIK
2105 if (adev->flags & AMD_IS_APU)
2106 adev->family = AMDGPU_FAMILY_KV;
2108 adev->family = AMDGPU_FAMILY_CI;
2110 r = cik_set_ip_blocks(adev);
2118 case CHIP_POLARIS10:
2119 case CHIP_POLARIS11:
2120 case CHIP_POLARIS12:
2124 if (adev->flags & AMD_IS_APU)
2125 adev->family = AMDGPU_FAMILY_CZ;
2127 adev->family = AMDGPU_FAMILY_VI;
2129 r = vi_set_ip_blocks(adev);
2134 r = amdgpu_discovery_set_ip_blocks(adev);
2140 amdgpu_amdkfd_device_probe(adev);
2142 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2143 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2144 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2145 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2146 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2148 for (i = 0; i < adev->num_ip_blocks; i++) {
2149 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2150 DRM_ERROR("disabled ip block: %d <%s>\n",
2151 i, adev->ip_blocks[i].version->funcs->name);
2152 adev->ip_blocks[i].status.valid = false;
2154 if (adev->ip_blocks[i].version->funcs->early_init) {
2155 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2157 adev->ip_blocks[i].status.valid = false;
2159 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2160 adev->ip_blocks[i].version->funcs->name, r);
2163 adev->ip_blocks[i].status.valid = true;
2166 adev->ip_blocks[i].status.valid = true;
2169 /* get the vbios after the asic_funcs are set up */
2170 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2171 r = amdgpu_device_parse_gpu_info_fw(adev);
2176 if (!amdgpu_get_bios(adev))
2179 r = amdgpu_atombios_init(adev);
2181 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2182 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2186 /*get pf2vf msg info at it's earliest time*/
2187 if (amdgpu_sriov_vf(adev))
2188 amdgpu_virt_init_data_exchange(adev);
2193 adev->cg_flags &= amdgpu_cg_mask;
2194 adev->pg_flags &= amdgpu_pg_mask;
2199 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2203 for (i = 0; i < adev->num_ip_blocks; i++) {
2204 if (!adev->ip_blocks[i].status.sw)
2206 if (adev->ip_blocks[i].status.hw)
2208 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2209 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2210 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2211 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2213 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2214 adev->ip_blocks[i].version->funcs->name, r);
2217 adev->ip_blocks[i].status.hw = true;
2224 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2228 for (i = 0; i < adev->num_ip_blocks; i++) {
2229 if (!adev->ip_blocks[i].status.sw)
2231 if (adev->ip_blocks[i].status.hw)
2233 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2235 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2236 adev->ip_blocks[i].version->funcs->name, r);
2239 adev->ip_blocks[i].status.hw = true;
2245 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2249 uint32_t smu_version;
2251 if (adev->asic_type >= CHIP_VEGA10) {
2252 for (i = 0; i < adev->num_ip_blocks; i++) {
2253 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2256 if (!adev->ip_blocks[i].status.sw)
2259 /* no need to do the fw loading again if already done*/
2260 if (adev->ip_blocks[i].status.hw == true)
2263 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2264 r = adev->ip_blocks[i].version->funcs->resume(adev);
2266 DRM_ERROR("resume of IP block <%s> failed %d\n",
2267 adev->ip_blocks[i].version->funcs->name, r);
2271 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2273 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2274 adev->ip_blocks[i].version->funcs->name, r);
2279 adev->ip_blocks[i].status.hw = true;
2284 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2285 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2291 * amdgpu_device_ip_init - run init for hardware IPs
2293 * @adev: amdgpu_device pointer
2295 * Main initialization pass for hardware IPs. The list of all the hardware
2296 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2297 * are run. sw_init initializes the software state associated with each IP
2298 * and hw_init initializes the hardware associated with each IP.
2299 * Returns 0 on success, negative error code on failure.
2301 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2305 r = amdgpu_ras_init(adev);
2309 for (i = 0; i < adev->num_ip_blocks; i++) {
2310 if (!adev->ip_blocks[i].status.valid)
2312 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2314 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2315 adev->ip_blocks[i].version->funcs->name, r);
2318 adev->ip_blocks[i].status.sw = true;
2320 /* need to do gmc hw init early so we can allocate gpu mem */
2321 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2322 /* Try to reserve bad pages early */
2323 if (amdgpu_sriov_vf(adev))
2324 amdgpu_virt_exchange_data(adev);
2326 r = amdgpu_device_vram_scratch_init(adev);
2328 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2331 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2333 DRM_ERROR("hw_init %d failed %d\n", i, r);
2336 r = amdgpu_device_wb_init(adev);
2338 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2341 adev->ip_blocks[i].status.hw = true;
2343 /* right after GMC hw init, we create CSA */
2344 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2345 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2346 AMDGPU_GEM_DOMAIN_VRAM,
2349 DRM_ERROR("allocate CSA failed %d\n", r);
2356 if (amdgpu_sriov_vf(adev))
2357 amdgpu_virt_exchange_data(adev);
2359 r = amdgpu_ib_pool_init(adev);
2361 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2362 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2366 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2370 r = amdgpu_device_ip_hw_init_phase1(adev);
2374 r = amdgpu_device_fw_loading(adev);
2378 r = amdgpu_device_ip_hw_init_phase2(adev);
2383 * retired pages will be loaded from eeprom and reserved here,
2384 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2385 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2386 * for I2C communication which only true at this point.
2388 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2389 * failure from bad gpu situation and stop amdgpu init process
2390 * accordingly. For other failed cases, it will still release all
2391 * the resource and print error message, rather than returning one
2392 * negative value to upper level.
2394 * Note: theoretically, this should be called before all vram allocations
2395 * to protect retired page from abusing
2397 r = amdgpu_ras_recovery_init(adev);
2401 if (adev->gmc.xgmi.num_physical_nodes > 1)
2402 amdgpu_xgmi_add_device(adev);
2404 /* Don't init kfd if whole hive need to be reset during init */
2405 if (!adev->gmc.xgmi.pending_reset)
2406 amdgpu_amdkfd_device_init(adev);
2408 amdgpu_fru_get_product_info(adev);
2411 if (amdgpu_sriov_vf(adev))
2412 amdgpu_virt_release_full_gpu(adev, true);
2418 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2420 * @adev: amdgpu_device pointer
2422 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2423 * this function before a GPU reset. If the value is retained after a
2424 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2426 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2428 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2432 * amdgpu_device_check_vram_lost - check if vram is valid
2434 * @adev: amdgpu_device pointer
2436 * Checks the reset magic value written to the gart pointer in VRAM.
2437 * The driver calls this after a GPU reset to see if the contents of
2438 * VRAM is lost or now.
2439 * returns true if vram is lost, false if not.
2441 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2443 if (memcmp(adev->gart.ptr, adev->reset_magic,
2444 AMDGPU_RESET_MAGIC_NUM))
2447 if (!amdgpu_in_reset(adev))
2451 * For all ASICs with baco/mode1 reset, the VRAM is
2452 * always assumed to be lost.
2454 switch (amdgpu_asic_reset_method(adev)) {
2455 case AMD_RESET_METHOD_BACO:
2456 case AMD_RESET_METHOD_MODE1:
2464 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2466 * @adev: amdgpu_device pointer
2467 * @state: clockgating state (gate or ungate)
2469 * The list of all the hardware IPs that make up the asic is walked and the
2470 * set_clockgating_state callbacks are run.
2471 * Late initialization pass enabling clockgating for hardware IPs.
2472 * Fini or suspend, pass disabling clockgating for hardware IPs.
2473 * Returns 0 on success, negative error code on failure.
2476 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2477 enum amd_clockgating_state state)
2481 if (amdgpu_emu_mode == 1)
2484 for (j = 0; j < adev->num_ip_blocks; j++) {
2485 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2486 if (!adev->ip_blocks[i].status.late_initialized)
2488 /* skip CG for GFX on S0ix */
2489 if (adev->in_s0ix &&
2490 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2492 /* skip CG for VCE/UVD, it's handled specially */
2493 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2494 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2495 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2496 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2497 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2498 /* enable clockgating to save power */
2499 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2502 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2503 adev->ip_blocks[i].version->funcs->name, r);
2512 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2513 enum amd_powergating_state state)
2517 if (amdgpu_emu_mode == 1)
2520 for (j = 0; j < adev->num_ip_blocks; j++) {
2521 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2522 if (!adev->ip_blocks[i].status.late_initialized)
2524 /* skip PG for GFX on S0ix */
2525 if (adev->in_s0ix &&
2526 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2528 /* skip CG for VCE/UVD, it's handled specially */
2529 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2530 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2531 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2532 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2533 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2534 /* enable powergating to save power */
2535 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2538 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2539 adev->ip_blocks[i].version->funcs->name, r);
2547 static int amdgpu_device_enable_mgpu_fan_boost(void)
2549 struct amdgpu_gpu_instance *gpu_ins;
2550 struct amdgpu_device *adev;
2553 mutex_lock(&mgpu_info.mutex);
2556 * MGPU fan boost feature should be enabled
2557 * only when there are two or more dGPUs in
2560 if (mgpu_info.num_dgpu < 2)
2563 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2564 gpu_ins = &(mgpu_info.gpu_ins[i]);
2565 adev = gpu_ins->adev;
2566 if (!(adev->flags & AMD_IS_APU) &&
2567 !gpu_ins->mgpu_fan_enabled) {
2568 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2572 gpu_ins->mgpu_fan_enabled = 1;
2577 mutex_unlock(&mgpu_info.mutex);
2583 * amdgpu_device_ip_late_init - run late init for hardware IPs
2585 * @adev: amdgpu_device pointer
2587 * Late initialization pass for hardware IPs. The list of all the hardware
2588 * IPs that make up the asic is walked and the late_init callbacks are run.
2589 * late_init covers any special initialization that an IP requires
2590 * after all of the have been initialized or something that needs to happen
2591 * late in the init process.
2592 * Returns 0 on success, negative error code on failure.
2594 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2596 struct amdgpu_gpu_instance *gpu_instance;
2599 for (i = 0; i < adev->num_ip_blocks; i++) {
2600 if (!adev->ip_blocks[i].status.hw)
2602 if (adev->ip_blocks[i].version->funcs->late_init) {
2603 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2605 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2606 adev->ip_blocks[i].version->funcs->name, r);
2610 adev->ip_blocks[i].status.late_initialized = true;
2613 amdgpu_ras_set_error_query_ready(adev, true);
2615 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2616 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2618 amdgpu_device_fill_reset_magic(adev);
2620 r = amdgpu_device_enable_mgpu_fan_boost();
2622 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2624 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2625 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2626 adev->asic_type == CHIP_ALDEBARAN ))
2627 smu_handle_passthrough_sbr(&adev->smu, true);
2629 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2630 mutex_lock(&mgpu_info.mutex);
2633 * Reset device p-state to low as this was booted with high.
2635 * This should be performed only after all devices from the same
2636 * hive get initialized.
2638 * However, it's unknown how many device in the hive in advance.
2639 * As this is counted one by one during devices initializations.
2641 * So, we wait for all XGMI interlinked devices initialized.
2642 * This may bring some delays as those devices may come from
2643 * different hives. But that should be OK.
2645 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2646 for (i = 0; i < mgpu_info.num_gpu; i++) {
2647 gpu_instance = &(mgpu_info.gpu_ins[i]);
2648 if (gpu_instance->adev->flags & AMD_IS_APU)
2651 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2652 AMDGPU_XGMI_PSTATE_MIN);
2654 DRM_ERROR("pstate setting failed (%d).\n", r);
2660 mutex_unlock(&mgpu_info.mutex);
2667 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2669 * @adev: amdgpu_device pointer
2671 * For ASICs need to disable SMC first
2673 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2677 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2680 for (i = 0; i < adev->num_ip_blocks; i++) {
2681 if (!adev->ip_blocks[i].status.hw)
2683 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2684 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2685 /* XXX handle errors */
2687 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2688 adev->ip_blocks[i].version->funcs->name, r);
2690 adev->ip_blocks[i].status.hw = false;
2696 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2700 for (i = 0; i < adev->num_ip_blocks; i++) {
2701 if (!adev->ip_blocks[i].version->funcs->early_fini)
2704 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2706 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2707 adev->ip_blocks[i].version->funcs->name, r);
2711 amdgpu_amdkfd_suspend(adev, false);
2713 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2714 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2716 /* Workaroud for ASICs need to disable SMC first */
2717 amdgpu_device_smu_fini_early(adev);
2719 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2720 if (!adev->ip_blocks[i].status.hw)
2723 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2724 /* XXX handle errors */
2726 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2727 adev->ip_blocks[i].version->funcs->name, r);
2730 adev->ip_blocks[i].status.hw = false;
2733 if (amdgpu_sriov_vf(adev)) {
2734 if (amdgpu_virt_release_full_gpu(adev, false))
2735 DRM_ERROR("failed to release exclusive mode on fini\n");
2742 * amdgpu_device_ip_fini - run fini for hardware IPs
2744 * @adev: amdgpu_device pointer
2746 * Main teardown pass for hardware IPs. The list of all the hardware
2747 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2748 * are run. hw_fini tears down the hardware associated with each IP
2749 * and sw_fini tears down any software state associated with each IP.
2750 * Returns 0 on success, negative error code on failure.
2752 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2756 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2757 amdgpu_virt_release_ras_err_handler_data(adev);
2759 if (adev->gmc.xgmi.num_physical_nodes > 1)
2760 amdgpu_xgmi_remove_device(adev);
2762 amdgpu_amdkfd_device_fini_sw(adev);
2764 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2765 if (!adev->ip_blocks[i].status.sw)
2768 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2769 amdgpu_ucode_free_bo(adev);
2770 amdgpu_free_static_csa(&adev->virt.csa_obj);
2771 amdgpu_device_wb_fini(adev);
2772 amdgpu_device_vram_scratch_fini(adev);
2773 amdgpu_ib_pool_fini(adev);
2776 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2777 /* XXX handle errors */
2779 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2780 adev->ip_blocks[i].version->funcs->name, r);
2782 adev->ip_blocks[i].status.sw = false;
2783 adev->ip_blocks[i].status.valid = false;
2786 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2787 if (!adev->ip_blocks[i].status.late_initialized)
2789 if (adev->ip_blocks[i].version->funcs->late_fini)
2790 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2791 adev->ip_blocks[i].status.late_initialized = false;
2794 amdgpu_ras_fini(adev);
2800 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2802 * @work: work_struct.
2804 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2806 struct amdgpu_device *adev =
2807 container_of(work, struct amdgpu_device, delayed_init_work.work);
2810 r = amdgpu_ib_ring_tests(adev);
2812 DRM_ERROR("ib ring test failed (%d).\n", r);
2815 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2817 struct amdgpu_device *adev =
2818 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2820 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2821 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2823 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2824 adev->gfx.gfx_off_state = true;
2828 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2830 * @adev: amdgpu_device pointer
2832 * Main suspend function for hardware IPs. The list of all the hardware
2833 * IPs that make up the asic is walked, clockgating is disabled and the
2834 * suspend callbacks are run. suspend puts the hardware and software state
2835 * in each IP into a state suitable for suspend.
2836 * Returns 0 on success, negative error code on failure.
2838 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2842 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2843 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2845 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2846 if (!adev->ip_blocks[i].status.valid)
2849 /* displays are handled separately */
2850 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2853 /* XXX handle errors */
2854 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2855 /* XXX handle errors */
2857 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2858 adev->ip_blocks[i].version->funcs->name, r);
2862 adev->ip_blocks[i].status.hw = false;
2869 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2871 * @adev: amdgpu_device pointer
2873 * Main suspend function for hardware IPs. The list of all the hardware
2874 * IPs that make up the asic is walked, clockgating is disabled and the
2875 * suspend callbacks are run. suspend puts the hardware and software state
2876 * in each IP into a state suitable for suspend.
2877 * Returns 0 on success, negative error code on failure.
2879 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2884 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2886 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2887 if (!adev->ip_blocks[i].status.valid)
2889 /* displays are handled in phase1 */
2890 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2892 /* PSP lost connection when err_event_athub occurs */
2893 if (amdgpu_ras_intr_triggered() &&
2894 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2895 adev->ip_blocks[i].status.hw = false;
2899 /* skip unnecessary suspend if we do not initialize them yet */
2900 if (adev->gmc.xgmi.pending_reset &&
2901 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2902 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2903 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2904 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2905 adev->ip_blocks[i].status.hw = false;
2909 /* skip suspend of gfx and psp for S0ix
2910 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2911 * like at runtime. PSP is also part of the always on hardware
2912 * so no need to suspend it.
2914 if (adev->in_s0ix &&
2915 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2916 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2919 /* XXX handle errors */
2920 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2921 /* XXX handle errors */
2923 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2924 adev->ip_blocks[i].version->funcs->name, r);
2926 adev->ip_blocks[i].status.hw = false;
2927 /* handle putting the SMC in the appropriate state */
2928 if(!amdgpu_sriov_vf(adev)){
2929 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2930 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2932 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2933 adev->mp1_state, r);
2944 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2946 * @adev: amdgpu_device pointer
2948 * Main suspend function for hardware IPs. The list of all the hardware
2949 * IPs that make up the asic is walked, clockgating is disabled and the
2950 * suspend callbacks are run. suspend puts the hardware and software state
2951 * in each IP into a state suitable for suspend.
2952 * Returns 0 on success, negative error code on failure.
2954 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2958 if (amdgpu_sriov_vf(adev)) {
2959 amdgpu_virt_fini_data_exchange(adev);
2960 amdgpu_virt_request_full_gpu(adev, false);
2963 r = amdgpu_device_ip_suspend_phase1(adev);
2966 r = amdgpu_device_ip_suspend_phase2(adev);
2968 if (amdgpu_sriov_vf(adev))
2969 amdgpu_virt_release_full_gpu(adev, false);
2974 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2978 static enum amd_ip_block_type ip_order[] = {
2979 AMD_IP_BLOCK_TYPE_GMC,
2980 AMD_IP_BLOCK_TYPE_COMMON,
2981 AMD_IP_BLOCK_TYPE_PSP,
2982 AMD_IP_BLOCK_TYPE_IH,
2985 for (i = 0; i < adev->num_ip_blocks; i++) {
2987 struct amdgpu_ip_block *block;
2989 block = &adev->ip_blocks[i];
2990 block->status.hw = false;
2992 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2994 if (block->version->type != ip_order[j] ||
2995 !block->status.valid)
2998 r = block->version->funcs->hw_init(adev);
2999 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3002 block->status.hw = true;
3009 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3013 static enum amd_ip_block_type ip_order[] = {
3014 AMD_IP_BLOCK_TYPE_SMC,
3015 AMD_IP_BLOCK_TYPE_DCE,
3016 AMD_IP_BLOCK_TYPE_GFX,
3017 AMD_IP_BLOCK_TYPE_SDMA,
3018 AMD_IP_BLOCK_TYPE_UVD,
3019 AMD_IP_BLOCK_TYPE_VCE,
3020 AMD_IP_BLOCK_TYPE_VCN
3023 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3025 struct amdgpu_ip_block *block;
3027 for (j = 0; j < adev->num_ip_blocks; j++) {
3028 block = &adev->ip_blocks[j];
3030 if (block->version->type != ip_order[i] ||
3031 !block->status.valid ||
3035 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3036 r = block->version->funcs->resume(adev);
3038 r = block->version->funcs->hw_init(adev);
3040 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3043 block->status.hw = true;
3051 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3053 * @adev: amdgpu_device pointer
3055 * First resume function for hardware IPs. The list of all the hardware
3056 * IPs that make up the asic is walked and the resume callbacks are run for
3057 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3058 * after a suspend and updates the software state as necessary. This
3059 * function is also used for restoring the GPU after a GPU reset.
3060 * Returns 0 on success, negative error code on failure.
3062 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3066 for (i = 0; i < adev->num_ip_blocks; i++) {
3067 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3069 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3070 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3071 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3073 r = adev->ip_blocks[i].version->funcs->resume(adev);
3075 DRM_ERROR("resume of IP block <%s> failed %d\n",
3076 adev->ip_blocks[i].version->funcs->name, r);
3079 adev->ip_blocks[i].status.hw = true;
3087 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3089 * @adev: amdgpu_device pointer
3091 * First resume function for hardware IPs. The list of all the hardware
3092 * IPs that make up the asic is walked and the resume callbacks are run for
3093 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3094 * functional state after a suspend and updates the software state as
3095 * necessary. This function is also used for restoring the GPU after a GPU
3097 * Returns 0 on success, negative error code on failure.
3099 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3103 for (i = 0; i < adev->num_ip_blocks; i++) {
3104 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3106 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3107 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3108 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3109 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3111 r = adev->ip_blocks[i].version->funcs->resume(adev);
3113 DRM_ERROR("resume of IP block <%s> failed %d\n",
3114 adev->ip_blocks[i].version->funcs->name, r);
3117 adev->ip_blocks[i].status.hw = true;
3124 * amdgpu_device_ip_resume - run resume for hardware IPs
3126 * @adev: amdgpu_device pointer
3128 * Main resume function for hardware IPs. The hardware IPs
3129 * are split into two resume functions because they are
3130 * are also used in in recovering from a GPU reset and some additional
3131 * steps need to be take between them. In this case (S3/S4) they are
3133 * Returns 0 on success, negative error code on failure.
3135 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3139 r = amdgpu_amdkfd_resume_iommu(adev);
3143 r = amdgpu_device_ip_resume_phase1(adev);
3147 r = amdgpu_device_fw_loading(adev);
3151 r = amdgpu_device_ip_resume_phase2(adev);
3157 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3159 * @adev: amdgpu_device pointer
3161 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3163 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3165 if (amdgpu_sriov_vf(adev)) {
3166 if (adev->is_atom_fw) {
3167 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3168 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3170 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3171 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3174 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3175 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3180 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3182 * @asic_type: AMD asic type
3184 * Check if there is DC (new modesetting infrastructre) support for an asic.
3185 * returns true if DC has support, false if not.
3187 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3189 switch (asic_type) {
3190 #ifdef CONFIG_DRM_AMDGPU_SI
3194 /* chips with no display hardware */
3196 #if defined(CONFIG_DRM_AMD_DC)
3202 * We have systems in the wild with these ASICs that require
3203 * LVDS and VGA support which is not supported with DC.
3205 * Fallback to the non-DC driver here by default so as not to
3206 * cause regressions.
3208 #if defined(CONFIG_DRM_AMD_DC_SI)
3209 return amdgpu_dc > 0;
3218 * We have systems in the wild with these ASICs that require
3219 * LVDS and VGA support which is not supported with DC.
3221 * Fallback to the non-DC driver here by default so as not to
3222 * cause regressions.
3224 return amdgpu_dc > 0;
3228 case CHIP_POLARIS10:
3229 case CHIP_POLARIS11:
3230 case CHIP_POLARIS12:
3237 #if defined(CONFIG_DRM_AMD_DC_DCN)
3243 case CHIP_CYAN_SKILLFISH:
3244 case CHIP_SIENNA_CICHLID:
3245 case CHIP_NAVY_FLOUNDER:
3246 case CHIP_DIMGREY_CAVEFISH:
3247 case CHIP_BEIGE_GOBY:
3249 case CHIP_YELLOW_CARP:
3252 return amdgpu_dc != 0;
3256 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3257 "but isn't supported by ASIC, ignoring\n");
3264 * amdgpu_device_has_dc_support - check if dc is supported
3266 * @adev: amdgpu_device pointer
3268 * Returns true for supported, false for not supported
3270 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3272 if (amdgpu_sriov_vf(adev) ||
3273 adev->enable_virtual_display ||
3274 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3277 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3280 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3282 struct amdgpu_device *adev =
3283 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3284 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3286 /* It's a bug to not have a hive within this function */
3291 * Use task barrier to synchronize all xgmi reset works across the
3292 * hive. task_barrier_enter and task_barrier_exit will block
3293 * until all the threads running the xgmi reset works reach
3294 * those points. task_barrier_full will do both blocks.
3296 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3298 task_barrier_enter(&hive->tb);
3299 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3301 if (adev->asic_reset_res)
3304 task_barrier_exit(&hive->tb);
3305 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3307 if (adev->asic_reset_res)
3310 if (adev->mmhub.ras_funcs &&
3311 adev->mmhub.ras_funcs->reset_ras_error_count)
3312 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3315 task_barrier_full(&hive->tb);
3316 adev->asic_reset_res = amdgpu_asic_reset(adev);
3320 if (adev->asic_reset_res)
3321 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3322 adev->asic_reset_res, adev_to_drm(adev)->unique);
3323 amdgpu_put_xgmi_hive(hive);
3326 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3328 char *input = amdgpu_lockup_timeout;
3329 char *timeout_setting = NULL;
3335 * By default timeout for non compute jobs is 10000
3336 * and 60000 for compute jobs.
3337 * In SR-IOV or passthrough mode, timeout for compute
3338 * jobs are 60000 by default.
3340 adev->gfx_timeout = msecs_to_jiffies(10000);
3341 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3342 if (amdgpu_sriov_vf(adev))
3343 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3344 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3346 adev->compute_timeout = msecs_to_jiffies(60000);
3348 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3349 while ((timeout_setting = strsep(&input, ",")) &&
3350 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3351 ret = kstrtol(timeout_setting, 0, &timeout);
3358 } else if (timeout < 0) {
3359 timeout = MAX_SCHEDULE_TIMEOUT;
3360 dev_warn(adev->dev, "lockup timeout disabled");
3361 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3363 timeout = msecs_to_jiffies(timeout);
3368 adev->gfx_timeout = timeout;
3371 adev->compute_timeout = timeout;
3374 adev->sdma_timeout = timeout;
3377 adev->video_timeout = timeout;
3384 * There is only one value specified and
3385 * it should apply to all non-compute jobs.
3388 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3389 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3390 adev->compute_timeout = adev->gfx_timeout;
3398 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3400 * @adev: amdgpu_device pointer
3402 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3404 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3406 struct iommu_domain *domain;
3408 domain = iommu_get_domain_for_dev(adev->dev);
3409 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3410 adev->ram_is_direct_mapped = true;
3413 static const struct attribute *amdgpu_dev_attributes[] = {
3414 &dev_attr_product_name.attr,
3415 &dev_attr_product_number.attr,
3416 &dev_attr_serial_number.attr,
3417 &dev_attr_pcie_replay_count.attr,
3422 * amdgpu_device_init - initialize the driver
3424 * @adev: amdgpu_device pointer
3425 * @flags: driver flags
3427 * Initializes the driver info and hw (all asics).
3428 * Returns 0 for success or an error on failure.
3429 * Called at driver startup.
3431 int amdgpu_device_init(struct amdgpu_device *adev,
3434 struct drm_device *ddev = adev_to_drm(adev);
3435 struct pci_dev *pdev = adev->pdev;
3440 adev->shutdown = false;
3441 adev->flags = flags;
3443 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3444 adev->asic_type = amdgpu_force_asic_type;
3446 adev->asic_type = flags & AMD_ASIC_MASK;
3448 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3449 if (amdgpu_emu_mode == 1)
3450 adev->usec_timeout *= 10;
3451 adev->gmc.gart_size = 512 * 1024 * 1024;
3452 adev->accel_working = false;
3453 adev->num_rings = 0;
3454 adev->mman.buffer_funcs = NULL;
3455 adev->mman.buffer_funcs_ring = NULL;
3456 adev->vm_manager.vm_pte_funcs = NULL;
3457 adev->vm_manager.vm_pte_num_scheds = 0;
3458 adev->gmc.gmc_funcs = NULL;
3459 adev->harvest_ip_mask = 0x0;
3460 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3461 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3463 adev->smc_rreg = &amdgpu_invalid_rreg;
3464 adev->smc_wreg = &amdgpu_invalid_wreg;
3465 adev->pcie_rreg = &amdgpu_invalid_rreg;
3466 adev->pcie_wreg = &amdgpu_invalid_wreg;
3467 adev->pciep_rreg = &amdgpu_invalid_rreg;
3468 adev->pciep_wreg = &amdgpu_invalid_wreg;
3469 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3470 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3471 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3472 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3473 adev->didt_rreg = &amdgpu_invalid_rreg;
3474 adev->didt_wreg = &amdgpu_invalid_wreg;
3475 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3476 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3477 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3478 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3480 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3481 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3482 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3484 /* mutex initialization are all done here so we
3485 * can recall function without having locking issues */
3486 mutex_init(&adev->firmware.mutex);
3487 mutex_init(&adev->pm.mutex);
3488 mutex_init(&adev->gfx.gpu_clock_mutex);
3489 mutex_init(&adev->srbm_mutex);
3490 mutex_init(&adev->gfx.pipe_reserve_mutex);
3491 mutex_init(&adev->gfx.gfx_off_mutex);
3492 mutex_init(&adev->grbm_idx_mutex);
3493 mutex_init(&adev->mn_lock);
3494 mutex_init(&adev->virt.vf_errors.lock);
3495 hash_init(adev->mn_hash);
3496 atomic_set(&adev->in_gpu_reset, 0);
3497 init_rwsem(&adev->reset_sem);
3498 mutex_init(&adev->psp.mutex);
3499 mutex_init(&adev->notifier_lock);
3501 amdgpu_device_init_apu_flags(adev);
3503 r = amdgpu_device_check_arguments(adev);
3507 spin_lock_init(&adev->mmio_idx_lock);
3508 spin_lock_init(&adev->smc_idx_lock);
3509 spin_lock_init(&adev->pcie_idx_lock);
3510 spin_lock_init(&adev->uvd_ctx_idx_lock);
3511 spin_lock_init(&adev->didt_idx_lock);
3512 spin_lock_init(&adev->gc_cac_idx_lock);
3513 spin_lock_init(&adev->se_cac_idx_lock);
3514 spin_lock_init(&adev->audio_endpt_idx_lock);
3515 spin_lock_init(&adev->mm_stats.lock);
3517 INIT_LIST_HEAD(&adev->shadow_list);
3518 mutex_init(&adev->shadow_list_lock);
3520 INIT_LIST_HEAD(&adev->reset_list);
3522 INIT_DELAYED_WORK(&adev->delayed_init_work,
3523 amdgpu_device_delayed_init_work_handler);
3524 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3525 amdgpu_device_delay_enable_gfx_off);
3527 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3529 adev->gfx.gfx_off_req_count = 1;
3530 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3532 atomic_set(&adev->throttling_logging_enabled, 1);
3534 * If throttling continues, logging will be performed every minute
3535 * to avoid log flooding. "-1" is subtracted since the thermal
3536 * throttling interrupt comes every second. Thus, the total logging
3537 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3538 * for throttling interrupt) = 60 seconds.
3540 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3541 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3543 /* Registers mapping */
3544 /* TODO: block userspace mapping of io register */
3545 if (adev->asic_type >= CHIP_BONAIRE) {
3546 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3547 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3549 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3550 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3553 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3554 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3556 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3557 if (adev->rmmio == NULL) {
3560 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3561 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3563 amdgpu_device_get_pcie_info(adev);
3566 DRM_INFO("MCBP is enabled\n");
3568 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3569 adev->enable_mes = true;
3571 /* detect hw virtualization here */
3572 amdgpu_detect_virtualization(adev);
3574 r = amdgpu_device_get_job_timeout_settings(adev);
3576 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3580 /* early init functions */
3581 r = amdgpu_device_ip_early_init(adev);
3585 /* Need to get xgmi info early to decide the reset behavior*/
3586 if (adev->gmc.xgmi.supported) {
3587 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3592 /* enable PCIE atomic ops */
3593 if (amdgpu_sriov_vf(adev))
3594 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3595 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3596 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3598 adev->have_atomics_support =
3599 !pci_enable_atomic_ops_to_root(adev->pdev,
3600 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3601 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3602 if (!adev->have_atomics_support)
3603 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3605 /* doorbell bar mapping and doorbell index init*/
3606 amdgpu_device_doorbell_init(adev);
3608 if (amdgpu_emu_mode == 1) {
3609 /* post the asic on emulation mode */
3610 emu_soc_asic_init(adev);
3611 goto fence_driver_init;
3614 amdgpu_reset_init(adev);
3616 /* detect if we are with an SRIOV vbios */
3617 amdgpu_device_detect_sriov_bios(adev);
3619 /* check if we need to reset the asic
3620 * E.g., driver was not cleanly unloaded previously, etc.
3622 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3623 if (adev->gmc.xgmi.num_physical_nodes) {
3624 dev_info(adev->dev, "Pending hive reset.\n");
3625 adev->gmc.xgmi.pending_reset = true;
3626 /* Only need to init necessary block for SMU to handle the reset */
3627 for (i = 0; i < adev->num_ip_blocks; i++) {
3628 if (!adev->ip_blocks[i].status.valid)
3630 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3631 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3632 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3633 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3634 DRM_DEBUG("IP %s disabled for hw_init.\n",
3635 adev->ip_blocks[i].version->funcs->name);
3636 adev->ip_blocks[i].status.hw = true;
3640 r = amdgpu_asic_reset(adev);
3642 dev_err(adev->dev, "asic reset on init failed\n");
3648 pci_enable_pcie_error_reporting(adev->pdev);
3650 /* Post card if necessary */
3651 if (amdgpu_device_need_post(adev)) {
3653 dev_err(adev->dev, "no vBIOS found\n");
3657 DRM_INFO("GPU posting now...\n");
3658 r = amdgpu_device_asic_init(adev);
3660 dev_err(adev->dev, "gpu post error!\n");
3665 if (adev->is_atom_fw) {
3666 /* Initialize clocks */
3667 r = amdgpu_atomfirmware_get_clock_info(adev);
3669 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3670 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3674 /* Initialize clocks */
3675 r = amdgpu_atombios_get_clock_info(adev);
3677 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3678 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3681 /* init i2c buses */
3682 if (!amdgpu_device_has_dc_support(adev))
3683 amdgpu_atombios_i2c_init(adev);
3688 r = amdgpu_fence_driver_sw_init(adev);
3690 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3691 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3695 /* init the mode config */
3696 drm_mode_config_init(adev_to_drm(adev));
3698 r = amdgpu_device_ip_init(adev);
3700 /* failed in exclusive mode due to timeout */
3701 if (amdgpu_sriov_vf(adev) &&
3702 !amdgpu_sriov_runtime(adev) &&
3703 amdgpu_virt_mmio_blocked(adev) &&
3704 !amdgpu_virt_wait_reset(adev)) {
3705 dev_err(adev->dev, "VF exclusive mode timeout\n");
3706 /* Don't send request since VF is inactive. */
3707 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3708 adev->virt.ops = NULL;
3710 goto release_ras_con;
3712 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3713 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3714 goto release_ras_con;
3717 amdgpu_fence_driver_hw_init(adev);
3720 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3721 adev->gfx.config.max_shader_engines,
3722 adev->gfx.config.max_sh_per_se,
3723 adev->gfx.config.max_cu_per_sh,
3724 adev->gfx.cu_info.number);
3726 adev->accel_working = true;
3728 amdgpu_vm_check_compute_bug(adev);
3730 /* Initialize the buffer migration limit. */
3731 if (amdgpu_moverate >= 0)
3732 max_MBps = amdgpu_moverate;
3734 max_MBps = 8; /* Allow 8 MB/s. */
3735 /* Get a log2 for easy divisions. */
3736 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3738 r = amdgpu_pm_sysfs_init(adev);
3740 adev->pm_sysfs_en = false;
3741 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3743 adev->pm_sysfs_en = true;
3745 r = amdgpu_ucode_sysfs_init(adev);
3747 adev->ucode_sysfs_en = false;
3748 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3750 adev->ucode_sysfs_en = true;
3752 if ((amdgpu_testing & 1)) {
3753 if (adev->accel_working)
3754 amdgpu_test_moves(adev);
3756 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3758 if (amdgpu_benchmarking) {
3759 if (adev->accel_working)
3760 amdgpu_benchmark(adev, amdgpu_benchmarking);
3762 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3766 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3767 * Otherwise the mgpu fan boost feature will be skipped due to the
3768 * gpu instance is counted less.
3770 amdgpu_register_gpu_instance(adev);
3772 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3773 * explicit gating rather than handling it automatically.
3775 if (!adev->gmc.xgmi.pending_reset) {
3776 r = amdgpu_device_ip_late_init(adev);
3778 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3779 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3780 goto release_ras_con;
3783 amdgpu_ras_resume(adev);
3784 queue_delayed_work(system_wq, &adev->delayed_init_work,
3785 msecs_to_jiffies(AMDGPU_RESUME_MS));
3788 if (amdgpu_sriov_vf(adev))
3789 flush_delayed_work(&adev->delayed_init_work);
3791 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3793 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3795 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3796 r = amdgpu_pmu_init(adev);
3798 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3800 /* Have stored pci confspace at hand for restore in sudden PCI error */
3801 if (amdgpu_device_cache_pci_state(adev->pdev))
3802 pci_restore_state(pdev);
3804 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3805 /* this will fail for cards that aren't VGA class devices, just
3807 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3808 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3810 if (amdgpu_device_supports_px(ddev)) {
3812 vga_switcheroo_register_client(adev->pdev,
3813 &amdgpu_switcheroo_ops, px);
3814 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3817 if (adev->gmc.xgmi.pending_reset)
3818 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3819 msecs_to_jiffies(AMDGPU_RESUME_MS));
3821 amdgpu_device_check_iommu_direct_map(adev);
3826 amdgpu_release_ras_context(adev);
3829 amdgpu_vf_error_trans_all(adev);
3834 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3837 /* Clear all CPU mappings pointing to this device */
3838 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3840 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3841 amdgpu_device_doorbell_fini(adev);
3843 iounmap(adev->rmmio);
3845 if (adev->mman.aper_base_kaddr)
3846 iounmap(adev->mman.aper_base_kaddr);
3847 adev->mman.aper_base_kaddr = NULL;
3849 /* Memory manager related */
3850 if (!adev->gmc.xgmi.connected_to_cpu) {
3851 arch_phys_wc_del(adev->gmc.vram_mtrr);
3852 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3857 * amdgpu_device_fini_hw - tear down the driver
3859 * @adev: amdgpu_device pointer
3861 * Tear down the driver info (all asics).
3862 * Called at driver shutdown.
3864 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3866 dev_info(adev->dev, "amdgpu: finishing device.\n");
3867 flush_delayed_work(&adev->delayed_init_work);
3868 if (adev->mman.initialized) {
3869 flush_delayed_work(&adev->mman.bdev.wq);
3870 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3872 adev->shutdown = true;
3874 /* make sure IB test finished before entering exclusive mode
3875 * to avoid preemption on IB test
3877 if (amdgpu_sriov_vf(adev)) {
3878 amdgpu_virt_request_full_gpu(adev, false);
3879 amdgpu_virt_fini_data_exchange(adev);
3882 /* disable all interrupts */
3883 amdgpu_irq_disable_all(adev);
3884 if (adev->mode_info.mode_config_initialized){
3885 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3886 drm_helper_force_disable_all(adev_to_drm(adev));
3888 drm_atomic_helper_shutdown(adev_to_drm(adev));
3890 amdgpu_fence_driver_hw_fini(adev);
3892 if (adev->pm_sysfs_en)
3893 amdgpu_pm_sysfs_fini(adev);
3894 if (adev->ucode_sysfs_en)
3895 amdgpu_ucode_sysfs_fini(adev);
3896 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3898 /* disable ras feature must before hw fini */
3899 amdgpu_ras_pre_fini(adev);
3901 amdgpu_device_ip_fini_early(adev);
3903 amdgpu_irq_fini_hw(adev);
3905 if (adev->mman.initialized)
3906 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3908 amdgpu_gart_dummy_page_fini(adev);
3910 if (drm_dev_is_unplugged(adev_to_drm(adev)))
3911 amdgpu_device_unmap_mmio(adev);
3915 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3919 amdgpu_fence_driver_sw_fini(adev);
3920 amdgpu_device_ip_fini(adev);
3921 release_firmware(adev->firmware.gpu_info_fw);
3922 adev->firmware.gpu_info_fw = NULL;
3923 adev->accel_working = false;
3925 amdgpu_reset_fini(adev);
3927 /* free i2c buses */
3928 if (!amdgpu_device_has_dc_support(adev))
3929 amdgpu_i2c_fini(adev);
3931 if (amdgpu_emu_mode != 1)
3932 amdgpu_atombios_fini(adev);
3936 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3937 vga_switcheroo_unregister_client(adev->pdev);
3938 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3940 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3941 vga_client_unregister(adev->pdev);
3943 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
3945 iounmap(adev->rmmio);
3947 amdgpu_device_doorbell_fini(adev);
3951 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3952 amdgpu_pmu_fini(adev);
3953 if (adev->mman.discovery_bin)
3954 amdgpu_discovery_fini(adev);
3956 kfree(adev->pci_state);
3961 * amdgpu_device_evict_resources - evict device resources
3962 * @adev: amdgpu device object
3964 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
3965 * of the vram memory type. Mainly used for evicting device resources
3969 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
3971 /* No need to evict vram on APUs for suspend to ram or s2idle */
3972 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
3975 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
3976 DRM_WARN("evicting device resources failed\n");
3984 * amdgpu_device_suspend - initiate device suspend
3986 * @dev: drm dev pointer
3987 * @fbcon : notify the fbdev of suspend
3989 * Puts the hw in the suspend state (all asics).
3990 * Returns 0 for success or an error on failure.
3991 * Called at driver suspend.
3993 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3995 struct amdgpu_device *adev = drm_to_adev(dev);
3997 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4000 adev->in_suspend = true;
4002 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4003 DRM_WARN("smart shift update failed\n");
4005 drm_kms_helper_poll_disable(dev);
4008 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4010 cancel_delayed_work_sync(&adev->delayed_init_work);
4012 amdgpu_ras_suspend(adev);
4014 amdgpu_device_ip_suspend_phase1(adev);
4017 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4019 amdgpu_device_evict_resources(adev);
4021 amdgpu_fence_driver_hw_fini(adev);
4023 amdgpu_device_ip_suspend_phase2(adev);
4029 * amdgpu_device_resume - initiate device resume
4031 * @dev: drm dev pointer
4032 * @fbcon : notify the fbdev of resume
4034 * Bring the hw back to operating state (all asics).
4035 * Returns 0 for success or an error on failure.
4036 * Called at driver resume.
4038 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4040 struct amdgpu_device *adev = drm_to_adev(dev);
4043 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4047 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
4050 if (amdgpu_device_need_post(adev)) {
4051 r = amdgpu_device_asic_init(adev);
4053 dev_err(adev->dev, "amdgpu asic init failed\n");
4056 r = amdgpu_device_ip_resume(adev);
4058 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4061 amdgpu_fence_driver_hw_init(adev);
4063 r = amdgpu_device_ip_late_init(adev);
4067 queue_delayed_work(system_wq, &adev->delayed_init_work,
4068 msecs_to_jiffies(AMDGPU_RESUME_MS));
4070 if (!adev->in_s0ix) {
4071 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4076 /* Make sure IB tests flushed */
4077 flush_delayed_work(&adev->delayed_init_work);
4080 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4082 drm_kms_helper_poll_enable(dev);
4084 amdgpu_ras_resume(adev);
4087 * Most of the connector probing functions try to acquire runtime pm
4088 * refs to ensure that the GPU is powered on when connector polling is
4089 * performed. Since we're calling this from a runtime PM callback,
4090 * trying to acquire rpm refs will cause us to deadlock.
4092 * Since we're guaranteed to be holding the rpm lock, it's safe to
4093 * temporarily disable the rpm helpers so this doesn't deadlock us.
4096 dev->dev->power.disable_depth++;
4098 if (!amdgpu_device_has_dc_support(adev))
4099 drm_helper_hpd_irq_event(dev);
4101 drm_kms_helper_hotplug_event(dev);
4103 dev->dev->power.disable_depth--;
4105 adev->in_suspend = false;
4107 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4108 DRM_WARN("smart shift update failed\n");
4114 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4116 * @adev: amdgpu_device pointer
4118 * The list of all the hardware IPs that make up the asic is walked and
4119 * the check_soft_reset callbacks are run. check_soft_reset determines
4120 * if the asic is still hung or not.
4121 * Returns true if any of the IPs are still in a hung state, false if not.
4123 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4126 bool asic_hang = false;
4128 if (amdgpu_sriov_vf(adev))
4131 if (amdgpu_asic_need_full_reset(adev))
4134 for (i = 0; i < adev->num_ip_blocks; i++) {
4135 if (!adev->ip_blocks[i].status.valid)
4137 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4138 adev->ip_blocks[i].status.hang =
4139 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4140 if (adev->ip_blocks[i].status.hang) {
4141 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4149 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4151 * @adev: amdgpu_device pointer
4153 * The list of all the hardware IPs that make up the asic is walked and the
4154 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4155 * handles any IP specific hardware or software state changes that are
4156 * necessary for a soft reset to succeed.
4157 * Returns 0 on success, negative error code on failure.
4159 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4163 for (i = 0; i < adev->num_ip_blocks; i++) {
4164 if (!adev->ip_blocks[i].status.valid)
4166 if (adev->ip_blocks[i].status.hang &&
4167 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4168 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4178 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4180 * @adev: amdgpu_device pointer
4182 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4183 * reset is necessary to recover.
4184 * Returns true if a full asic reset is required, false if not.
4186 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4190 if (amdgpu_asic_need_full_reset(adev))
4193 for (i = 0; i < adev->num_ip_blocks; i++) {
4194 if (!adev->ip_blocks[i].status.valid)
4196 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4197 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4198 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4199 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4200 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4201 if (adev->ip_blocks[i].status.hang) {
4202 dev_info(adev->dev, "Some block need full reset!\n");
4211 * amdgpu_device_ip_soft_reset - do a soft reset
4213 * @adev: amdgpu_device pointer
4215 * The list of all the hardware IPs that make up the asic is walked and the
4216 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4217 * IP specific hardware or software state changes that are necessary to soft
4219 * Returns 0 on success, negative error code on failure.
4221 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4225 for (i = 0; i < adev->num_ip_blocks; i++) {
4226 if (!adev->ip_blocks[i].status.valid)
4228 if (adev->ip_blocks[i].status.hang &&
4229 adev->ip_blocks[i].version->funcs->soft_reset) {
4230 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4240 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4242 * @adev: amdgpu_device pointer
4244 * The list of all the hardware IPs that make up the asic is walked and the
4245 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4246 * handles any IP specific hardware or software state changes that are
4247 * necessary after the IP has been soft reset.
4248 * Returns 0 on success, negative error code on failure.
4250 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4254 for (i = 0; i < adev->num_ip_blocks; i++) {
4255 if (!adev->ip_blocks[i].status.valid)
4257 if (adev->ip_blocks[i].status.hang &&
4258 adev->ip_blocks[i].version->funcs->post_soft_reset)
4259 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4268 * amdgpu_device_recover_vram - Recover some VRAM contents
4270 * @adev: amdgpu_device pointer
4272 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4273 * restore things like GPUVM page tables after a GPU reset where
4274 * the contents of VRAM might be lost.
4277 * 0 on success, negative error code on failure.
4279 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4281 struct dma_fence *fence = NULL, *next = NULL;
4282 struct amdgpu_bo *shadow;
4283 struct amdgpu_bo_vm *vmbo;
4286 if (amdgpu_sriov_runtime(adev))
4287 tmo = msecs_to_jiffies(8000);
4289 tmo = msecs_to_jiffies(100);
4291 dev_info(adev->dev, "recover vram bo from shadow start\n");
4292 mutex_lock(&adev->shadow_list_lock);
4293 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4295 /* No need to recover an evicted BO */
4296 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4297 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4298 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4301 r = amdgpu_bo_restore_shadow(shadow, &next);
4306 tmo = dma_fence_wait_timeout(fence, false, tmo);
4307 dma_fence_put(fence);
4312 } else if (tmo < 0) {
4320 mutex_unlock(&adev->shadow_list_lock);
4323 tmo = dma_fence_wait_timeout(fence, false, tmo);
4324 dma_fence_put(fence);
4326 if (r < 0 || tmo <= 0) {
4327 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4331 dev_info(adev->dev, "recover vram bo from shadow done\n");
4337 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4339 * @adev: amdgpu_device pointer
4340 * @from_hypervisor: request from hypervisor
4342 * do VF FLR and reinitialize Asic
4343 * return 0 means succeeded otherwise failed
4345 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4346 bool from_hypervisor)
4349 struct amdgpu_hive_info *hive = NULL;
4351 amdgpu_amdkfd_pre_reset(adev);
4353 amdgpu_amdkfd_pre_reset(adev);
4355 if (from_hypervisor)
4356 r = amdgpu_virt_request_full_gpu(adev, true);
4358 r = amdgpu_virt_reset_gpu(adev);
4362 /* Resume IP prior to SMC */
4363 r = amdgpu_device_ip_reinit_early_sriov(adev);
4367 amdgpu_virt_init_data_exchange(adev);
4369 r = amdgpu_device_fw_loading(adev);
4373 /* now we are okay to resume SMC/CP/SDMA */
4374 r = amdgpu_device_ip_reinit_late_sriov(adev);
4378 hive = amdgpu_get_xgmi_hive(adev);
4379 /* Update PSP FW topology after reset */
4380 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4381 r = amdgpu_xgmi_update_topology(hive, adev);
4384 amdgpu_put_xgmi_hive(hive);
4387 amdgpu_irq_gpu_reset_resume_helper(adev);
4388 r = amdgpu_ib_ring_tests(adev);
4389 amdgpu_amdkfd_post_reset(adev);
4393 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4394 amdgpu_inc_vram_lost(adev);
4395 r = amdgpu_device_recover_vram(adev);
4397 amdgpu_virt_release_full_gpu(adev, true);
4403 * amdgpu_device_has_job_running - check if there is any job in mirror list
4405 * @adev: amdgpu_device pointer
4407 * check if there is any job in mirror list
4409 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4412 struct drm_sched_job *job;
4414 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4415 struct amdgpu_ring *ring = adev->rings[i];
4417 if (!ring || !ring->sched.thread)
4420 spin_lock(&ring->sched.job_list_lock);
4421 job = list_first_entry_or_null(&ring->sched.pending_list,
4422 struct drm_sched_job, list);
4423 spin_unlock(&ring->sched.job_list_lock);
4431 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4433 * @adev: amdgpu_device pointer
4435 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4438 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4440 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4441 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4445 if (amdgpu_gpu_recovery == 0)
4448 if (amdgpu_sriov_vf(adev))
4451 if (amdgpu_gpu_recovery == -1) {
4452 switch (adev->asic_type) {
4458 case CHIP_POLARIS10:
4459 case CHIP_POLARIS11:
4460 case CHIP_POLARIS12:
4471 case CHIP_SIENNA_CICHLID:
4472 case CHIP_NAVY_FLOUNDER:
4473 case CHIP_DIMGREY_CAVEFISH:
4474 case CHIP_BEIGE_GOBY:
4476 case CHIP_ALDEBARAN:
4486 dev_info(adev->dev, "GPU recovery disabled.\n");
4490 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4495 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4497 dev_info(adev->dev, "GPU mode1 reset\n");
4500 pci_clear_master(adev->pdev);
4502 amdgpu_device_cache_pci_state(adev->pdev);
4504 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4505 dev_info(adev->dev, "GPU smu mode1 reset\n");
4506 ret = amdgpu_dpm_mode1_reset(adev);
4508 dev_info(adev->dev, "GPU psp mode1 reset\n");
4509 ret = psp_gpu_reset(adev);
4513 dev_err(adev->dev, "GPU mode1 reset failed\n");
4515 amdgpu_device_load_pci_state(adev->pdev);
4517 /* wait for asic to come out of reset */
4518 for (i = 0; i < adev->usec_timeout; i++) {
4519 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4521 if (memsize != 0xffffffff)
4526 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4530 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4531 struct amdgpu_reset_context *reset_context)
4534 struct amdgpu_job *job = NULL;
4535 bool need_full_reset =
4536 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4538 if (reset_context->reset_req_dev == adev)
4539 job = reset_context->job;
4541 if (amdgpu_sriov_vf(adev)) {
4542 /* stop the data exchange thread */
4543 amdgpu_virt_fini_data_exchange(adev);
4546 /* block all schedulers and reset given job's ring */
4547 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4548 struct amdgpu_ring *ring = adev->rings[i];
4550 if (!ring || !ring->sched.thread)
4553 /*clear job fence from fence drv to avoid force_completion
4554 *leave NULL and vm flush fence in fence drv */
4555 amdgpu_fence_driver_clear_job_fences(ring);
4557 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4558 amdgpu_fence_driver_force_completion(ring);
4562 drm_sched_increase_karma(&job->base);
4564 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4565 /* If reset handler not implemented, continue; otherwise return */
4571 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4572 if (!amdgpu_sriov_vf(adev)) {
4574 if (!need_full_reset)
4575 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4577 if (!need_full_reset) {
4578 amdgpu_device_ip_pre_soft_reset(adev);
4579 r = amdgpu_device_ip_soft_reset(adev);
4580 amdgpu_device_ip_post_soft_reset(adev);
4581 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4582 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4583 need_full_reset = true;
4587 if (need_full_reset)
4588 r = amdgpu_device_ip_suspend(adev);
4589 if (need_full_reset)
4590 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4592 clear_bit(AMDGPU_NEED_FULL_RESET,
4593 &reset_context->flags);
4599 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4600 struct amdgpu_reset_context *reset_context)
4602 struct amdgpu_device *tmp_adev = NULL;
4603 bool need_full_reset, skip_hw_reset, vram_lost = false;
4606 /* Try reset handler method first */
4607 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4609 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4610 /* If reset handler not implemented, continue; otherwise return */
4616 /* Reset handler not implemented, use the default method */
4618 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4619 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4622 * ASIC reset has to be done on all XGMI hive nodes ASAP
4623 * to allow proper links negotiation in FW (within 1 sec)
4625 if (!skip_hw_reset && need_full_reset) {
4626 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4627 /* For XGMI run all resets in parallel to speed up the process */
4628 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4629 tmp_adev->gmc.xgmi.pending_reset = false;
4630 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4633 r = amdgpu_asic_reset(tmp_adev);
4636 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4637 r, adev_to_drm(tmp_adev)->unique);
4642 /* For XGMI wait for all resets to complete before proceed */
4644 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4645 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4646 flush_work(&tmp_adev->xgmi_reset_work);
4647 r = tmp_adev->asic_reset_res;
4655 if (!r && amdgpu_ras_intr_triggered()) {
4656 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4657 if (tmp_adev->mmhub.ras_funcs &&
4658 tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4659 tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4662 amdgpu_ras_intr_cleared();
4665 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4666 if (need_full_reset) {
4668 r = amdgpu_device_asic_init(tmp_adev);
4670 dev_warn(tmp_adev->dev, "asic atom init failed!");
4672 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4673 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4677 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4681 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4683 DRM_INFO("VRAM is lost due to GPU reset!\n");
4684 amdgpu_inc_vram_lost(tmp_adev);
4687 r = amdgpu_device_fw_loading(tmp_adev);
4691 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4696 amdgpu_device_fill_reset_magic(tmp_adev);
4699 * Add this ASIC as tracked as reset was already
4700 * complete successfully.
4702 amdgpu_register_gpu_instance(tmp_adev);
4704 if (!reset_context->hive &&
4705 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4706 amdgpu_xgmi_add_device(tmp_adev);
4708 r = amdgpu_device_ip_late_init(tmp_adev);
4712 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4715 * The GPU enters bad state once faulty pages
4716 * by ECC has reached the threshold, and ras
4717 * recovery is scheduled next. So add one check
4718 * here to break recovery if it indeed exceeds
4719 * bad page threshold, and remind user to
4720 * retire this GPU or setting one bigger
4721 * bad_page_threshold value to fix this once
4722 * probing driver again.
4724 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4726 amdgpu_ras_resume(tmp_adev);
4732 /* Update PSP FW topology after reset */
4733 if (reset_context->hive &&
4734 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4735 r = amdgpu_xgmi_update_topology(
4736 reset_context->hive, tmp_adev);
4742 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4743 r = amdgpu_ib_ring_tests(tmp_adev);
4745 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4746 need_full_reset = true;
4753 r = amdgpu_device_recover_vram(tmp_adev);
4755 tmp_adev->asic_reset_res = r;
4759 if (need_full_reset)
4760 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4762 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4766 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4767 struct amdgpu_hive_info *hive)
4769 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4773 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4775 down_write(&adev->reset_sem);
4778 switch (amdgpu_asic_reset_method(adev)) {
4779 case AMD_RESET_METHOD_MODE1:
4780 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4782 case AMD_RESET_METHOD_MODE2:
4783 adev->mp1_state = PP_MP1_STATE_RESET;
4786 adev->mp1_state = PP_MP1_STATE_NONE;
4793 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4795 amdgpu_vf_error_trans_all(adev);
4796 adev->mp1_state = PP_MP1_STATE_NONE;
4797 atomic_set(&adev->in_gpu_reset, 0);
4798 up_write(&adev->reset_sem);
4802 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4803 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4805 * unlock won't require roll back.
4807 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4809 struct amdgpu_device *tmp_adev = NULL;
4811 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
4813 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4816 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4817 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4820 } else if (!amdgpu_device_lock_adev(adev, hive))
4825 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4827 * if the lockup iteration break in the middle of a hive,
4828 * it may means there may has a race issue,
4829 * or a hive device locked up independently.
4830 * we may be in trouble and may not, so will try to roll back
4831 * the lock and give out a warnning.
4833 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4834 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4835 amdgpu_device_unlock_adev(tmp_adev);
4841 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4843 struct pci_dev *p = NULL;
4845 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4846 adev->pdev->bus->number, 1);
4848 pm_runtime_enable(&(p->dev));
4849 pm_runtime_resume(&(p->dev));
4853 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4855 enum amd_reset_method reset_method;
4856 struct pci_dev *p = NULL;
4860 * For now, only BACO and mode1 reset are confirmed
4861 * to suffer the audio issue without proper suspended.
4863 reset_method = amdgpu_asic_reset_method(adev);
4864 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4865 (reset_method != AMD_RESET_METHOD_MODE1))
4868 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4869 adev->pdev->bus->number, 1);
4873 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4876 * If we cannot get the audio device autosuspend delay,
4877 * a fixed 4S interval will be used. Considering 3S is
4878 * the audio controller default autosuspend delay setting.
4879 * 4S used here is guaranteed to cover that.
4881 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4883 while (!pm_runtime_status_suspended(&(p->dev))) {
4884 if (!pm_runtime_suspend(&(p->dev)))
4887 if (expires < ktime_get_mono_fast_ns()) {
4888 dev_warn(adev->dev, "failed to suspend display audio\n");
4889 /* TODO: abort the succeeding gpu reset? */
4894 pm_runtime_disable(&(p->dev));
4899 static void amdgpu_device_recheck_guilty_jobs(
4900 struct amdgpu_device *adev, struct list_head *device_list_handle,
4901 struct amdgpu_reset_context *reset_context)
4905 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4906 struct amdgpu_ring *ring = adev->rings[i];
4908 struct drm_sched_job *s_job;
4910 if (!ring || !ring->sched.thread)
4913 s_job = list_first_entry_or_null(&ring->sched.pending_list,
4914 struct drm_sched_job, list);
4918 /* clear job's guilty and depend the folowing step to decide the real one */
4919 drm_sched_reset_karma(s_job);
4920 /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4921 * to make sure fence is balanced */
4922 dma_fence_get(s_job->s_fence->parent);
4923 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4925 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4926 if (ret == 0) { /* timeout */
4927 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4928 ring->sched.name, s_job->id);
4931 drm_sched_increase_karma(s_job);
4934 if (amdgpu_sriov_vf(adev)) {
4935 amdgpu_virt_fini_data_exchange(adev);
4936 r = amdgpu_device_reset_sriov(adev, false);
4938 adev->asic_reset_res = r;
4940 clear_bit(AMDGPU_SKIP_HW_RESET,
4941 &reset_context->flags);
4942 r = amdgpu_do_asic_reset(device_list_handle,
4944 if (r && r == -EAGAIN)
4949 * add reset counter so that the following
4950 * resubmitted job could flush vmid
4952 atomic_inc(&adev->gpu_reset_counter);
4956 /* got the hw fence, signal finished fence */
4957 atomic_dec(ring->sched.score);
4958 dma_fence_put(s_job->s_fence->parent);
4959 dma_fence_get(&s_job->s_fence->finished);
4960 dma_fence_signal(&s_job->s_fence->finished);
4961 dma_fence_put(&s_job->s_fence->finished);
4963 /* remove node from list and free the job */
4964 spin_lock(&ring->sched.job_list_lock);
4965 list_del_init(&s_job->list);
4966 spin_unlock(&ring->sched.job_list_lock);
4967 ring->sched.ops->free_job(s_job);
4972 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4974 * @adev: amdgpu_device pointer
4975 * @job: which job trigger hang
4977 * Attempt to reset the GPU if it has hung (all asics).
4978 * Attempt to do soft-reset or full-reset and reinitialize Asic
4979 * Returns 0 for success or an error on failure.
4982 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4983 struct amdgpu_job *job)
4985 struct list_head device_list, *device_list_handle = NULL;
4986 bool job_signaled = false;
4987 struct amdgpu_hive_info *hive = NULL;
4988 struct amdgpu_device *tmp_adev = NULL;
4990 bool need_emergency_restart = false;
4991 bool audio_suspended = false;
4992 int tmp_vram_lost_counter;
4993 struct amdgpu_reset_context reset_context;
4995 memset(&reset_context, 0, sizeof(reset_context));
4998 * Special case: RAS triggered and full reset isn't supported
5000 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5003 * Flush RAM to disk so that after reboot
5004 * the user can read log and see why the system rebooted.
5006 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5007 DRM_WARN("Emergency reboot.");
5010 emergency_restart();
5013 dev_info(adev->dev, "GPU %s begin!\n",
5014 need_emergency_restart ? "jobs stop":"reset");
5017 * Here we trylock to avoid chain of resets executing from
5018 * either trigger by jobs on different adevs in XGMI hive or jobs on
5019 * different schedulers for same device while this TO handler is running.
5020 * We always reset all schedulers for device and all devices for XGMI
5021 * hive so that should take care of them too.
5023 if (!amdgpu_sriov_vf(adev))
5024 hive = amdgpu_get_xgmi_hive(adev);
5026 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
5027 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
5028 job ? job->base.id : -1, hive->hive_id);
5029 amdgpu_put_xgmi_hive(hive);
5031 drm_sched_increase_karma(&job->base);
5034 mutex_lock(&hive->hive_lock);
5037 reset_context.method = AMD_RESET_METHOD_NONE;
5038 reset_context.reset_req_dev = adev;
5039 reset_context.job = job;
5040 reset_context.hive = hive;
5041 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5044 * lock the device before we try to operate the linked list
5045 * if didn't get the device lock, don't touch the linked list since
5046 * others may iterating it.
5048 r = amdgpu_device_lock_hive_adev(adev, hive);
5050 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
5051 job ? job->base.id : -1);
5053 /* even we skipped this reset, still need to set the job to guilty */
5055 drm_sched_increase_karma(&job->base);
5060 * Build list of devices to reset.
5061 * In case we are in XGMI hive mode, resort the device list
5062 * to put adev in the 1st position.
5064 INIT_LIST_HEAD(&device_list);
5065 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5066 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5067 list_add_tail(&tmp_adev->reset_list, &device_list);
5068 if (!list_is_first(&adev->reset_list, &device_list))
5069 list_rotate_to_front(&adev->reset_list, &device_list);
5070 device_list_handle = &device_list;
5072 list_add_tail(&adev->reset_list, &device_list);
5073 device_list_handle = &device_list;
5076 /* block all schedulers and reset given job's ring */
5077 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5079 * Try to put the audio codec into suspend state
5080 * before gpu reset started.
5082 * Due to the power domain of the graphics device
5083 * is shared with AZ power domain. Without this,
5084 * we may change the audio hardware from behind
5085 * the audio driver's back. That will trigger
5086 * some audio codec errors.
5088 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5089 audio_suspended = true;
5091 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5093 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5095 if (!amdgpu_sriov_vf(tmp_adev))
5096 amdgpu_amdkfd_pre_reset(tmp_adev);
5099 * Mark these ASICs to be reseted as untracked first
5100 * And add them back after reset completed
5102 amdgpu_unregister_gpu_instance(tmp_adev);
5104 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5106 /* disable ras on ALL IPs */
5107 if (!need_emergency_restart &&
5108 amdgpu_device_ip_need_full_reset(tmp_adev))
5109 amdgpu_ras_suspend(tmp_adev);
5111 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5112 struct amdgpu_ring *ring = tmp_adev->rings[i];
5114 if (!ring || !ring->sched.thread)
5117 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5119 if (need_emergency_restart)
5120 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5122 atomic_inc(&tmp_adev->gpu_reset_counter);
5125 if (need_emergency_restart)
5126 goto skip_sched_resume;
5129 * Must check guilty signal here since after this point all old
5130 * HW fences are force signaled.
5132 * job->base holds a reference to parent fence
5134 if (job && job->base.s_fence->parent &&
5135 dma_fence_is_signaled(job->base.s_fence->parent)) {
5136 job_signaled = true;
5137 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5141 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5142 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5143 r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5144 /*TODO Should we stop ?*/
5146 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5147 r, adev_to_drm(tmp_adev)->unique);
5148 tmp_adev->asic_reset_res = r;
5152 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5153 /* Actual ASIC resets if needed.*/
5154 /* Host driver will handle XGMI hive reset for SRIOV */
5155 if (amdgpu_sriov_vf(adev)) {
5156 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5158 adev->asic_reset_res = r;
5160 r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5161 if (r && r == -EAGAIN)
5167 /* Post ASIC reset for all devs .*/
5168 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5171 * Sometimes a later bad compute job can block a good gfx job as gfx
5172 * and compute ring share internal GC HW mutually. We add an additional
5173 * guilty jobs recheck step to find the real guilty job, it synchronously
5174 * submits and pends for the first job being signaled. If it gets timeout,
5175 * we identify it as a real guilty job.
5177 if (amdgpu_gpu_recovery == 2 &&
5178 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5179 amdgpu_device_recheck_guilty_jobs(
5180 tmp_adev, device_list_handle, &reset_context);
5182 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5183 struct amdgpu_ring *ring = tmp_adev->rings[i];
5185 if (!ring || !ring->sched.thread)
5188 /* No point to resubmit jobs if we didn't HW reset*/
5189 if (!tmp_adev->asic_reset_res && !job_signaled)
5190 drm_sched_resubmit_jobs(&ring->sched);
5192 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5195 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5196 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5199 tmp_adev->asic_reset_res = 0;
5202 /* bad news, how to tell it to userspace ? */
5203 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5204 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5206 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5207 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5208 DRM_WARN("smart shift update failed\n");
5213 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5214 /* unlock kfd: SRIOV would do it separately */
5215 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5216 amdgpu_amdkfd_post_reset(tmp_adev);
5218 /* kfd_post_reset will do nothing if kfd device is not initialized,
5219 * need to bring up kfd here if it's not be initialized before
5221 if (!adev->kfd.init_complete)
5222 amdgpu_amdkfd_device_init(adev);
5224 if (audio_suspended)
5225 amdgpu_device_resume_display_audio(tmp_adev);
5226 amdgpu_device_unlock_adev(tmp_adev);
5231 atomic_set(&hive->in_reset, 0);
5232 mutex_unlock(&hive->hive_lock);
5233 amdgpu_put_xgmi_hive(hive);
5236 if (r && r != -EAGAIN)
5237 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5242 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5244 * @adev: amdgpu_device pointer
5246 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5247 * and lanes) of the slot the device is in. Handles APUs and
5248 * virtualized environments where PCIE config space may not be available.
5250 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5252 struct pci_dev *pdev;
5253 enum pci_bus_speed speed_cap, platform_speed_cap;
5254 enum pcie_link_width platform_link_width;
5256 if (amdgpu_pcie_gen_cap)
5257 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5259 if (amdgpu_pcie_lane_cap)
5260 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5262 /* covers APUs as well */
5263 if (pci_is_root_bus(adev->pdev->bus)) {
5264 if (adev->pm.pcie_gen_mask == 0)
5265 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5266 if (adev->pm.pcie_mlw_mask == 0)
5267 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5271 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5274 pcie_bandwidth_available(adev->pdev, NULL,
5275 &platform_speed_cap, &platform_link_width);
5277 if (adev->pm.pcie_gen_mask == 0) {
5280 speed_cap = pcie_get_speed_cap(pdev);
5281 if (speed_cap == PCI_SPEED_UNKNOWN) {
5282 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5283 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5284 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5286 if (speed_cap == PCIE_SPEED_32_0GT)
5287 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5288 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5289 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5290 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5291 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5292 else if (speed_cap == PCIE_SPEED_16_0GT)
5293 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5294 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5295 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5296 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5297 else if (speed_cap == PCIE_SPEED_8_0GT)
5298 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5299 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5300 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5301 else if (speed_cap == PCIE_SPEED_5_0GT)
5302 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5303 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5305 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5308 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5309 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5310 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5312 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5313 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5314 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5315 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5316 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5317 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5318 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5319 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5320 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5321 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5322 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5323 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5324 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5325 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5326 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5327 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5328 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5329 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5331 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5335 if (adev->pm.pcie_mlw_mask == 0) {
5336 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5337 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5339 switch (platform_link_width) {
5341 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5342 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5343 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5344 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5345 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5346 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5347 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5350 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5351 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5352 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5353 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5354 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5355 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5358 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5359 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5360 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5361 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5362 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5365 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5366 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5367 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5368 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5371 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5372 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5373 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5376 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5377 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5380 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5389 int amdgpu_device_baco_enter(struct drm_device *dev)
5391 struct amdgpu_device *adev = drm_to_adev(dev);
5392 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5394 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5397 if (ras && adev->ras_enabled &&
5398 adev->nbio.funcs->enable_doorbell_interrupt)
5399 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5401 return amdgpu_dpm_baco_enter(adev);
5404 int amdgpu_device_baco_exit(struct drm_device *dev)
5406 struct amdgpu_device *adev = drm_to_adev(dev);
5407 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5410 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5413 ret = amdgpu_dpm_baco_exit(adev);
5417 if (ras && adev->ras_enabled &&
5418 adev->nbio.funcs->enable_doorbell_interrupt)
5419 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5421 if (amdgpu_passthrough(adev) &&
5422 adev->nbio.funcs->clear_doorbell_interrupt)
5423 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5428 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5432 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5433 struct amdgpu_ring *ring = adev->rings[i];
5435 if (!ring || !ring->sched.thread)
5438 cancel_delayed_work_sync(&ring->sched.work_tdr);
5443 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5444 * @pdev: PCI device struct
5445 * @state: PCI channel state
5447 * Description: Called when a PCI error is detected.
5449 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5451 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5453 struct drm_device *dev = pci_get_drvdata(pdev);
5454 struct amdgpu_device *adev = drm_to_adev(dev);
5457 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5459 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5460 DRM_WARN("No support for XGMI hive yet...");
5461 return PCI_ERS_RESULT_DISCONNECT;
5464 adev->pci_channel_state = state;
5467 case pci_channel_io_normal:
5468 return PCI_ERS_RESULT_CAN_RECOVER;
5469 /* Fatal error, prepare for slot reset */
5470 case pci_channel_io_frozen:
5472 * Cancel and wait for all TDRs in progress if failing to
5473 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5475 * Locking adev->reset_sem will prevent any external access
5476 * to GPU during PCI error recovery
5478 while (!amdgpu_device_lock_adev(adev, NULL))
5479 amdgpu_cancel_all_tdr(adev);
5482 * Block any work scheduling as we do for regular GPU reset
5483 * for the duration of the recovery
5485 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5486 struct amdgpu_ring *ring = adev->rings[i];
5488 if (!ring || !ring->sched.thread)
5491 drm_sched_stop(&ring->sched, NULL);
5493 atomic_inc(&adev->gpu_reset_counter);
5494 return PCI_ERS_RESULT_NEED_RESET;
5495 case pci_channel_io_perm_failure:
5496 /* Permanent error, prepare for device removal */
5497 return PCI_ERS_RESULT_DISCONNECT;
5500 return PCI_ERS_RESULT_NEED_RESET;
5504 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5505 * @pdev: pointer to PCI device
5507 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5510 DRM_INFO("PCI error: mmio enabled callback!!\n");
5512 /* TODO - dump whatever for debugging purposes */
5514 /* This called only if amdgpu_pci_error_detected returns
5515 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5516 * works, no need to reset slot.
5519 return PCI_ERS_RESULT_RECOVERED;
5523 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5524 * @pdev: PCI device struct
5526 * Description: This routine is called by the pci error recovery
5527 * code after the PCI slot has been reset, just before we
5528 * should resume normal operations.
5530 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5532 struct drm_device *dev = pci_get_drvdata(pdev);
5533 struct amdgpu_device *adev = drm_to_adev(dev);
5535 struct amdgpu_reset_context reset_context;
5537 struct list_head device_list;
5539 DRM_INFO("PCI error: slot reset callback!!\n");
5541 memset(&reset_context, 0, sizeof(reset_context));
5543 INIT_LIST_HEAD(&device_list);
5544 list_add_tail(&adev->reset_list, &device_list);
5546 /* wait for asic to come out of reset */
5549 /* Restore PCI confspace */
5550 amdgpu_device_load_pci_state(pdev);
5552 /* confirm ASIC came out of reset */
5553 for (i = 0; i < adev->usec_timeout; i++) {
5554 memsize = amdgpu_asic_get_config_memsize(adev);
5556 if (memsize != 0xffffffff)
5560 if (memsize == 0xffffffff) {
5565 reset_context.method = AMD_RESET_METHOD_NONE;
5566 reset_context.reset_req_dev = adev;
5567 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5568 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5570 adev->no_hw_access = true;
5571 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5572 adev->no_hw_access = false;
5576 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5580 if (amdgpu_device_cache_pci_state(adev->pdev))
5581 pci_restore_state(adev->pdev);
5583 DRM_INFO("PCIe error recovery succeeded\n");
5585 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5586 amdgpu_device_unlock_adev(adev);
5589 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5593 * amdgpu_pci_resume() - resume normal ops after PCI reset
5594 * @pdev: pointer to PCI device
5596 * Called when the error recovery driver tells us that its
5597 * OK to resume normal operation.
5599 void amdgpu_pci_resume(struct pci_dev *pdev)
5601 struct drm_device *dev = pci_get_drvdata(pdev);
5602 struct amdgpu_device *adev = drm_to_adev(dev);
5606 DRM_INFO("PCI error: resume callback!!\n");
5608 /* Only continue execution for the case of pci_channel_io_frozen */
5609 if (adev->pci_channel_state != pci_channel_io_frozen)
5612 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5613 struct amdgpu_ring *ring = adev->rings[i];
5615 if (!ring || !ring->sched.thread)
5619 drm_sched_resubmit_jobs(&ring->sched);
5620 drm_sched_start(&ring->sched, true);
5623 amdgpu_device_unlock_adev(adev);
5626 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5628 struct drm_device *dev = pci_get_drvdata(pdev);
5629 struct amdgpu_device *adev = drm_to_adev(dev);
5632 r = pci_save_state(pdev);
5634 kfree(adev->pci_state);
5636 adev->pci_state = pci_store_saved_state(pdev);
5638 if (!adev->pci_state) {
5639 DRM_ERROR("Failed to store PCI saved state");
5643 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5650 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5652 struct drm_device *dev = pci_get_drvdata(pdev);
5653 struct amdgpu_device *adev = drm_to_adev(dev);
5656 if (!adev->pci_state)
5659 r = pci_load_saved_state(pdev, adev->pci_state);
5662 pci_restore_state(pdev);
5664 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5671 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5672 struct amdgpu_ring *ring)
5674 #ifdef CONFIG_X86_64
5675 if (adev->flags & AMD_IS_APU)
5678 if (adev->gmc.xgmi.connected_to_cpu)
5681 if (ring && ring->funcs->emit_hdp_flush)
5682 amdgpu_ring_emit_hdp_flush(ring);
5684 amdgpu_asic_flush_hdp(adev, ring);
5687 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5688 struct amdgpu_ring *ring)
5690 #ifdef CONFIG_X86_64
5691 if (adev->flags & AMD_IS_APU)
5694 if (adev->gmc.xgmi.connected_to_cpu)
5697 amdgpu_asic_invalidate_hdp(adev, ring);
5701 * amdgpu_device_halt() - bring hardware to some kind of halt state
5703 * @adev: amdgpu_device pointer
5705 * Bring hardware to some kind of halt state so that no one can touch it
5706 * any more. It will help to maintain error context when error occurred.
5707 * Compare to a simple hang, the system will keep stable at least for SSH
5708 * access. Then it should be trivial to inspect the hardware state and
5709 * see what's going on. Implemented as following:
5711 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5712 * clears all CPU mappings to device, disallows remappings through page faults
5713 * 2. amdgpu_irq_disable_all() disables all interrupts
5714 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5715 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5716 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5717 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5718 * flush any in flight DMA operations
5720 void amdgpu_device_halt(struct amdgpu_device *adev)
5722 struct pci_dev *pdev = adev->pdev;
5723 struct drm_device *ddev = adev_to_drm(adev);
5725 drm_dev_unplug(ddev);
5727 amdgpu_irq_disable_all(adev);
5729 amdgpu_fence_driver_hw_fini(adev);
5731 adev->no_hw_access = true;
5733 amdgpu_device_unmap_mmio(adev);
5735 pci_disable_device(pdev);
5736 pci_wait_for_pending_transaction(pdev);