2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
50 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
85 #define AMDGPU_RESUME_MS 2000
87 const char *amdgpu_asic_name[] = {
125 * DOC: pcie_replay_count
127 * The amdgpu driver provides a sysfs API for reporting the total number
128 * of PCIe replays (NAKs)
129 * The file pcie_replay_count is used for this and returns the total
130 * number of replays as a sum of the NAKs generated and NAKs received
133 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
134 struct device_attribute *attr, char *buf)
136 struct drm_device *ddev = dev_get_drvdata(dev);
137 struct amdgpu_device *adev = drm_to_adev(ddev);
138 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
140 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
143 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
144 amdgpu_device_get_pcie_replay_count, NULL);
146 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
151 * The amdgpu driver provides a sysfs API for reporting the product name
153 * The file serial_number is used for this and returns the product name
154 * as returned from the FRU.
155 * NOTE: This is only available for certain server cards
158 static ssize_t amdgpu_device_get_product_name(struct device *dev,
159 struct device_attribute *attr, char *buf)
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = drm_to_adev(ddev);
164 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
167 static DEVICE_ATTR(product_name, S_IRUGO,
168 amdgpu_device_get_product_name, NULL);
171 * DOC: product_number
173 * The amdgpu driver provides a sysfs API for reporting the part number
175 * The file serial_number is used for this and returns the part number
176 * as returned from the FRU.
177 * NOTE: This is only available for certain server cards
180 static ssize_t amdgpu_device_get_product_number(struct device *dev,
181 struct device_attribute *attr, char *buf)
183 struct drm_device *ddev = dev_get_drvdata(dev);
184 struct amdgpu_device *adev = drm_to_adev(ddev);
186 return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
189 static DEVICE_ATTR(product_number, S_IRUGO,
190 amdgpu_device_get_product_number, NULL);
195 * The amdgpu driver provides a sysfs API for reporting the serial number
197 * The file serial_number is used for this and returns the serial number
198 * as returned from the FRU.
199 * NOTE: This is only available for certain server cards
202 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
203 struct device_attribute *attr, char *buf)
205 struct drm_device *ddev = dev_get_drvdata(dev);
206 struct amdgpu_device *adev = drm_to_adev(ddev);
208 return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
211 static DEVICE_ATTR(serial_number, S_IRUGO,
212 amdgpu_device_get_serial_number, NULL);
215 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
217 * @dev: drm_device pointer
219 * Returns true if the device is a dGPU with ATPX power control,
220 * otherwise return false.
222 bool amdgpu_device_supports_px(struct drm_device *dev)
224 struct amdgpu_device *adev = drm_to_adev(dev);
226 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
232 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
234 * @dev: drm_device pointer
236 * Returns true if the device is a dGPU with ACPI power control,
237 * otherwise return false.
239 bool amdgpu_device_supports_boco(struct drm_device *dev)
241 struct amdgpu_device *adev = drm_to_adev(dev);
244 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
250 * amdgpu_device_supports_baco - Does the device support BACO
252 * @dev: drm_device pointer
254 * Returns true if the device supporte BACO,
255 * otherwise return false.
257 bool amdgpu_device_supports_baco(struct drm_device *dev)
259 struct amdgpu_device *adev = drm_to_adev(dev);
261 return amdgpu_asic_supports_baco(adev);
265 * VRAM access helper functions
269 * amdgpu_device_vram_access - read/write a buffer in vram
271 * @adev: amdgpu_device pointer
272 * @pos: offset of the buffer in vram
273 * @buf: virtual address of the buffer in system memory
274 * @size: read/write size, sizeof(@buf) must > @size
275 * @write: true - write to vram, otherwise - read from vram
277 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
278 uint32_t *buf, size_t size, bool write)
286 last = min(pos + size, adev->gmc.visible_vram_size);
288 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
289 size_t count = last - pos;
292 memcpy_toio(addr, buf, count);
294 amdgpu_asic_flush_hdp(adev, NULL);
296 amdgpu_asic_invalidate_hdp(adev, NULL);
298 memcpy_fromio(buf, addr, count);
310 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
311 for (last = pos + size; pos < last; pos += 4) {
312 uint32_t tmp = pos >> 31;
314 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
316 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
320 WREG32_NO_KIQ(mmMM_DATA, *buf++);
322 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
324 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
328 * register access helper functions.
331 /* Check if hw access should be skipped because of hotplug or device error */
332 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
334 if (adev->in_pci_err_recovery)
337 #ifdef CONFIG_LOCKDEP
339 * This is a bit complicated to understand, so worth a comment. What we assert
340 * here is that the GPU reset is not running on another thread in parallel.
342 * For this we trylock the read side of the reset semaphore, if that succeeds
343 * we know that the reset is not running in paralell.
345 * If the trylock fails we assert that we are either already holding the read
346 * side of the lock or are the reset thread itself and hold the write side of
350 if (down_read_trylock(&adev->reset_sem))
351 up_read(&adev->reset_sem);
353 lockdep_assert_held(&adev->reset_sem);
360 * amdgpu_device_rreg - read a memory mapped IO or indirect register
362 * @adev: amdgpu_device pointer
363 * @reg: dword aligned register offset
364 * @acc_flags: access flags which require special behavior
366 * Returns the 32 bit value from the offset specified.
368 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
369 uint32_t reg, uint32_t acc_flags)
373 if (amdgpu_device_skip_hw_access(adev))
376 if ((reg * 4) < adev->rmmio_size) {
377 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
378 amdgpu_sriov_runtime(adev) &&
379 down_read_trylock(&adev->reset_sem)) {
380 ret = amdgpu_kiq_rreg(adev, reg);
381 up_read(&adev->reset_sem);
383 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
386 ret = adev->pcie_rreg(adev, reg * 4);
389 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
395 * MMIO register read with bytes helper functions
396 * @offset:bytes offset from MMIO start
401 * amdgpu_mm_rreg8 - read a memory mapped IO register
403 * @adev: amdgpu_device pointer
404 * @offset: byte aligned register offset
406 * Returns the 8 bit value from the offset specified.
408 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
410 if (amdgpu_device_skip_hw_access(adev))
413 if (offset < adev->rmmio_size)
414 return (readb(adev->rmmio + offset));
419 * MMIO register write with bytes helper functions
420 * @offset:bytes offset from MMIO start
421 * @value: the value want to be written to the register
425 * amdgpu_mm_wreg8 - read a memory mapped IO register
427 * @adev: amdgpu_device pointer
428 * @offset: byte aligned register offset
429 * @value: 8 bit value to write
431 * Writes the value specified to the offset specified.
433 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
435 if (amdgpu_device_skip_hw_access(adev))
438 if (offset < adev->rmmio_size)
439 writeb(value, adev->rmmio + offset);
445 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
447 * @adev: amdgpu_device pointer
448 * @reg: dword aligned register offset
449 * @v: 32 bit value to write to the register
450 * @acc_flags: access flags which require special behavior
452 * Writes the value specified to the offset specified.
454 void amdgpu_device_wreg(struct amdgpu_device *adev,
455 uint32_t reg, uint32_t v,
458 if (amdgpu_device_skip_hw_access(adev))
461 if ((reg * 4) < adev->rmmio_size) {
462 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
463 amdgpu_sriov_runtime(adev) &&
464 down_read_trylock(&adev->reset_sem)) {
465 amdgpu_kiq_wreg(adev, reg, v);
466 up_read(&adev->reset_sem);
468 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
471 adev->pcie_wreg(adev, reg * 4, v);
474 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
478 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
480 * this function is invoked only the debugfs register access
482 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
483 uint32_t reg, uint32_t v)
485 if (amdgpu_device_skip_hw_access(adev))
488 if (amdgpu_sriov_fullaccess(adev) &&
489 adev->gfx.rlc.funcs &&
490 adev->gfx.rlc.funcs->is_rlcg_access_range) {
491 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
492 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
494 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
499 * amdgpu_mm_rdoorbell - read a doorbell dword
501 * @adev: amdgpu_device pointer
502 * @index: doorbell index
504 * Returns the value in the doorbell aperture at the
505 * requested doorbell index (CIK).
507 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
509 if (amdgpu_device_skip_hw_access(adev))
512 if (index < adev->doorbell.num_doorbells) {
513 return readl(adev->doorbell.ptr + index);
515 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
521 * amdgpu_mm_wdoorbell - write a doorbell dword
523 * @adev: amdgpu_device pointer
524 * @index: doorbell index
527 * Writes @v to the doorbell aperture at the
528 * requested doorbell index (CIK).
530 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
532 if (amdgpu_device_skip_hw_access(adev))
535 if (index < adev->doorbell.num_doorbells) {
536 writel(v, adev->doorbell.ptr + index);
538 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
543 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
545 * @adev: amdgpu_device pointer
546 * @index: doorbell index
548 * Returns the value in the doorbell aperture at the
549 * requested doorbell index (VEGA10+).
551 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
553 if (amdgpu_device_skip_hw_access(adev))
556 if (index < adev->doorbell.num_doorbells) {
557 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
559 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
565 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
567 * @adev: amdgpu_device pointer
568 * @index: doorbell index
571 * Writes @v to the doorbell aperture at the
572 * requested doorbell index (VEGA10+).
574 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
576 if (amdgpu_device_skip_hw_access(adev))
579 if (index < adev->doorbell.num_doorbells) {
580 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
582 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
587 * amdgpu_device_indirect_rreg - read an indirect register
589 * @adev: amdgpu_device pointer
590 * @pcie_index: mmio register offset
591 * @pcie_data: mmio register offset
592 * @reg_addr: indirect register address to read from
594 * Returns the value of indirect register @reg_addr
596 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
597 u32 pcie_index, u32 pcie_data,
602 void __iomem *pcie_index_offset;
603 void __iomem *pcie_data_offset;
605 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
606 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
607 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
609 writel(reg_addr, pcie_index_offset);
610 readl(pcie_index_offset);
611 r = readl(pcie_data_offset);
612 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
618 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
620 * @adev: amdgpu_device pointer
621 * @pcie_index: mmio register offset
622 * @pcie_data: mmio register offset
623 * @reg_addr: indirect register address to read from
625 * Returns the value of indirect register @reg_addr
627 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
628 u32 pcie_index, u32 pcie_data,
633 void __iomem *pcie_index_offset;
634 void __iomem *pcie_data_offset;
636 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
637 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
638 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
640 /* read low 32 bits */
641 writel(reg_addr, pcie_index_offset);
642 readl(pcie_index_offset);
643 r = readl(pcie_data_offset);
644 /* read high 32 bits */
645 writel(reg_addr + 4, pcie_index_offset);
646 readl(pcie_index_offset);
647 r |= ((u64)readl(pcie_data_offset) << 32);
648 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
654 * amdgpu_device_indirect_wreg - write an indirect register address
656 * @adev: amdgpu_device pointer
657 * @pcie_index: mmio register offset
658 * @pcie_data: mmio register offset
659 * @reg_addr: indirect register offset
660 * @reg_data: indirect register data
663 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
664 u32 pcie_index, u32 pcie_data,
665 u32 reg_addr, u32 reg_data)
668 void __iomem *pcie_index_offset;
669 void __iomem *pcie_data_offset;
671 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
672 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
673 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
675 writel(reg_addr, pcie_index_offset);
676 readl(pcie_index_offset);
677 writel(reg_data, pcie_data_offset);
678 readl(pcie_data_offset);
679 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
683 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
685 * @adev: amdgpu_device pointer
686 * @pcie_index: mmio register offset
687 * @pcie_data: mmio register offset
688 * @reg_addr: indirect register offset
689 * @reg_data: indirect register data
692 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
693 u32 pcie_index, u32 pcie_data,
694 u32 reg_addr, u64 reg_data)
697 void __iomem *pcie_index_offset;
698 void __iomem *pcie_data_offset;
700 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
701 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
702 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
704 /* write low 32 bits */
705 writel(reg_addr, pcie_index_offset);
706 readl(pcie_index_offset);
707 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
708 readl(pcie_data_offset);
709 /* write high 32 bits */
710 writel(reg_addr + 4, pcie_index_offset);
711 readl(pcie_index_offset);
712 writel((u32)(reg_data >> 32), pcie_data_offset);
713 readl(pcie_data_offset);
714 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
718 * amdgpu_invalid_rreg - dummy reg read function
720 * @adev: amdgpu_device pointer
721 * @reg: offset of register
723 * Dummy register read function. Used for register blocks
724 * that certain asics don't have (all asics).
725 * Returns the value in the register.
727 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
729 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
735 * amdgpu_invalid_wreg - dummy reg write function
737 * @adev: amdgpu_device pointer
738 * @reg: offset of register
739 * @v: value to write to the register
741 * Dummy register read function. Used for register blocks
742 * that certain asics don't have (all asics).
744 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
746 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
752 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
754 * @adev: amdgpu_device pointer
755 * @reg: offset of register
757 * Dummy register read function. Used for register blocks
758 * that certain asics don't have (all asics).
759 * Returns the value in the register.
761 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
763 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
769 * amdgpu_invalid_wreg64 - dummy reg write function
771 * @adev: amdgpu_device pointer
772 * @reg: offset of register
773 * @v: value to write to the register
775 * Dummy register read function. Used for register blocks
776 * that certain asics don't have (all asics).
778 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
780 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
786 * amdgpu_block_invalid_rreg - dummy reg read function
788 * @adev: amdgpu_device pointer
789 * @block: offset of instance
790 * @reg: offset of register
792 * Dummy register read function. Used for register blocks
793 * that certain asics don't have (all asics).
794 * Returns the value in the register.
796 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
797 uint32_t block, uint32_t reg)
799 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
806 * amdgpu_block_invalid_wreg - dummy reg write function
808 * @adev: amdgpu_device pointer
809 * @block: offset of instance
810 * @reg: offset of register
811 * @v: value to write to the register
813 * Dummy register read function. Used for register blocks
814 * that certain asics don't have (all asics).
816 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
818 uint32_t reg, uint32_t v)
820 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
826 * amdgpu_device_asic_init - Wrapper for atom asic_init
828 * @adev: amdgpu_device pointer
830 * Does any asic specific work and then calls atom asic init.
832 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
834 amdgpu_asic_pre_asic_init(adev);
836 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
840 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
842 * @adev: amdgpu_device pointer
844 * Allocates a scratch page of VRAM for use by various things in the
847 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
849 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
850 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
851 &adev->vram_scratch.robj,
852 &adev->vram_scratch.gpu_addr,
853 (void **)&adev->vram_scratch.ptr);
857 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
859 * @adev: amdgpu_device pointer
861 * Frees the VRAM scratch page.
863 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
865 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
869 * amdgpu_device_program_register_sequence - program an array of registers.
871 * @adev: amdgpu_device pointer
872 * @registers: pointer to the register array
873 * @array_size: size of the register array
875 * Programs an array or registers with and and or masks.
876 * This is a helper for setting golden registers.
878 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
879 const u32 *registers,
880 const u32 array_size)
882 u32 tmp, reg, and_mask, or_mask;
888 for (i = 0; i < array_size; i +=3) {
889 reg = registers[i + 0];
890 and_mask = registers[i + 1];
891 or_mask = registers[i + 2];
893 if (and_mask == 0xffffffff) {
898 if (adev->family >= AMDGPU_FAMILY_AI)
899 tmp |= (or_mask & and_mask);
908 * amdgpu_device_pci_config_reset - reset the GPU
910 * @adev: amdgpu_device pointer
912 * Resets the GPU using the pci config reset sequence.
913 * Only applicable to asics prior to vega10.
915 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
917 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
921 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
923 * @adev: amdgpu_device pointer
925 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
927 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
929 return pci_reset_function(adev->pdev);
933 * GPU doorbell aperture helpers function.
936 * amdgpu_device_doorbell_init - Init doorbell driver information.
938 * @adev: amdgpu_device pointer
940 * Init doorbell driver information (CIK)
941 * Returns 0 on success, error on failure.
943 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
946 /* No doorbell on SI hardware generation */
947 if (adev->asic_type < CHIP_BONAIRE) {
948 adev->doorbell.base = 0;
949 adev->doorbell.size = 0;
950 adev->doorbell.num_doorbells = 0;
951 adev->doorbell.ptr = NULL;
955 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
958 amdgpu_asic_init_doorbell_index(adev);
960 /* doorbell bar mapping */
961 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
962 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
964 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
965 adev->doorbell_index.max_assignment+1);
966 if (adev->doorbell.num_doorbells == 0)
969 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
970 * paging queue doorbell use the second page. The
971 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
972 * doorbells are in the first page. So with paging queue enabled,
973 * the max num_doorbells should + 1 page (0x400 in dword)
975 if (adev->asic_type >= CHIP_VEGA10)
976 adev->doorbell.num_doorbells += 0x400;
978 adev->doorbell.ptr = ioremap(adev->doorbell.base,
979 adev->doorbell.num_doorbells *
981 if (adev->doorbell.ptr == NULL)
988 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
990 * @adev: amdgpu_device pointer
992 * Tear down doorbell driver information (CIK)
994 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
996 iounmap(adev->doorbell.ptr);
997 adev->doorbell.ptr = NULL;
1003 * amdgpu_device_wb_*()
1004 * Writeback is the method by which the GPU updates special pages in memory
1005 * with the status of certain GPU events (fences, ring pointers,etc.).
1009 * amdgpu_device_wb_fini - Disable Writeback and free memory
1011 * @adev: amdgpu_device pointer
1013 * Disables Writeback and frees the Writeback memory (all asics).
1014 * Used at driver shutdown.
1016 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 if (adev->wb.wb_obj) {
1019 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 (void **)&adev->wb.wb);
1022 adev->wb.wb_obj = NULL;
1027 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1029 * @adev: amdgpu_device pointer
1031 * Initializes writeback and allocates writeback memory (all asics).
1032 * Used at driver startup.
1033 * Returns 0 on success or an -error on failure.
1035 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1039 if (adev->wb.wb_obj == NULL) {
1040 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1041 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1042 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1043 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1044 (void **)&adev->wb.wb);
1046 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1050 adev->wb.num_wb = AMDGPU_MAX_WB;
1051 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053 /* clear wb memory */
1054 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1061 * amdgpu_device_wb_get - Allocate a wb entry
1063 * @adev: amdgpu_device pointer
1066 * Allocate a wb slot for use by the driver (all asics).
1067 * Returns 0 on success or -EINVAL on failure.
1069 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1073 if (offset < adev->wb.num_wb) {
1074 __set_bit(offset, adev->wb.used);
1075 *wb = offset << 3; /* convert to dw offset */
1083 * amdgpu_device_wb_free - Free a wb entry
1085 * @adev: amdgpu_device pointer
1088 * Free a wb slot allocated for use by the driver (all asics)
1090 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1093 if (wb < adev->wb.num_wb)
1094 __clear_bit(wb, adev->wb.used);
1098 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1100 * @adev: amdgpu_device pointer
1102 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1103 * to fail, but if any of the BARs is not accessible after the size we abort
1104 * driver loading by returning -ENODEV.
1106 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1108 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1109 struct pci_bus *root;
1110 struct resource *res;
1116 if (amdgpu_sriov_vf(adev))
1119 /* skip if the bios has already enabled large BAR */
1120 if (adev->gmc.real_vram_size &&
1121 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1124 /* Check if the root BUS has 64bit memory resources */
1125 root = adev->pdev->bus;
1126 while (root->parent)
1127 root = root->parent;
1129 pci_bus_for_each_resource(root, res, i) {
1130 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1131 res->start > 0x100000000ull)
1135 /* Trying to resize is pointless without a root hub window above 4GB */
1139 /* Limit the BAR size to what is available */
1140 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1143 /* Disable memory decoding while we change the BAR addresses and size */
1144 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1145 pci_write_config_word(adev->pdev, PCI_COMMAND,
1146 cmd & ~PCI_COMMAND_MEMORY);
1148 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1149 amdgpu_device_doorbell_fini(adev);
1150 if (adev->asic_type >= CHIP_BONAIRE)
1151 pci_release_resource(adev->pdev, 2);
1153 pci_release_resource(adev->pdev, 0);
1155 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1157 DRM_INFO("Not enough PCI address space for a large BAR.");
1158 else if (r && r != -ENOTSUPP)
1159 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1161 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1163 /* When the doorbell or fb BAR isn't available we have no chance of
1166 r = amdgpu_device_doorbell_init(adev);
1167 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1170 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1176 * GPU helpers function.
1179 * amdgpu_device_need_post - check if the hw need post or not
1181 * @adev: amdgpu_device pointer
1183 * Check if the asic has been initialized (all asics) at driver startup
1184 * or post is needed if hw reset is performed.
1185 * Returns true if need or false if not.
1187 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1191 if (amdgpu_sriov_vf(adev))
1194 if (amdgpu_passthrough(adev)) {
1195 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1196 * some old smc fw still need driver do vPost otherwise gpu hang, while
1197 * those smc fw version above 22.15 doesn't have this flaw, so we force
1198 * vpost executed for smc version below 22.15
1200 if (adev->asic_type == CHIP_FIJI) {
1203 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1204 /* force vPost if error occured */
1208 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1209 if (fw_ver < 0x00160e00)
1214 /* Don't post if we need to reset whole hive on init */
1215 if (adev->gmc.xgmi.pending_reset)
1218 if (adev->has_hw_reset) {
1219 adev->has_hw_reset = false;
1223 /* bios scratch used on CIK+ */
1224 if (adev->asic_type >= CHIP_BONAIRE)
1225 return amdgpu_atombios_scratch_need_asic_init(adev);
1227 /* check MEM_SIZE for older asics */
1228 reg = amdgpu_asic_get_config_memsize(adev);
1230 if ((reg != 0) && (reg != 0xffffffff))
1236 /* if we get transitioned to only one device, take VGA back */
1238 * amdgpu_device_vga_set_decode - enable/disable vga decode
1240 * @cookie: amdgpu_device pointer
1241 * @state: enable/disable vga decode
1243 * Enable/disable vga decode (all asics).
1244 * Returns VGA resource flags.
1246 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1248 struct amdgpu_device *adev = cookie;
1249 amdgpu_asic_set_vga_state(adev, state);
1251 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1252 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1254 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1258 * amdgpu_device_check_block_size - validate the vm block size
1260 * @adev: amdgpu_device pointer
1262 * Validates the vm block size specified via module parameter.
1263 * The vm block size defines number of bits in page table versus page directory,
1264 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1265 * page table and the remaining bits are in the page directory.
1267 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1269 /* defines number of bits in page table versus page directory,
1270 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1271 * page table and the remaining bits are in the page directory */
1272 if (amdgpu_vm_block_size == -1)
1275 if (amdgpu_vm_block_size < 9) {
1276 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1277 amdgpu_vm_block_size);
1278 amdgpu_vm_block_size = -1;
1283 * amdgpu_device_check_vm_size - validate the vm size
1285 * @adev: amdgpu_device pointer
1287 * Validates the vm size in GB specified via module parameter.
1288 * The VM size is the size of the GPU virtual memory space in GB.
1290 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1292 /* no need to check the default value */
1293 if (amdgpu_vm_size == -1)
1296 if (amdgpu_vm_size < 1) {
1297 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1299 amdgpu_vm_size = -1;
1303 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1306 bool is_os_64 = (sizeof(void *) == 8);
1307 uint64_t total_memory;
1308 uint64_t dram_size_seven_GB = 0x1B8000000;
1309 uint64_t dram_size_three_GB = 0xB8000000;
1311 if (amdgpu_smu_memory_pool_size == 0)
1315 DRM_WARN("Not 64-bit OS, feature not supported\n");
1319 total_memory = (uint64_t)si.totalram * si.mem_unit;
1321 if ((amdgpu_smu_memory_pool_size == 1) ||
1322 (amdgpu_smu_memory_pool_size == 2)) {
1323 if (total_memory < dram_size_three_GB)
1325 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1326 (amdgpu_smu_memory_pool_size == 8)) {
1327 if (total_memory < dram_size_seven_GB)
1330 DRM_WARN("Smu memory pool size not supported\n");
1333 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1338 DRM_WARN("No enough system memory\n");
1340 adev->pm.smu_prv_buffer_size = 0;
1344 * amdgpu_device_check_arguments - validate module params
1346 * @adev: amdgpu_device pointer
1348 * Validates certain module parameters and updates
1349 * the associated values used by the driver (all asics).
1351 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1353 if (amdgpu_sched_jobs < 4) {
1354 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1356 amdgpu_sched_jobs = 4;
1357 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1358 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1360 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1363 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1364 /* gart size must be greater or equal to 32M */
1365 dev_warn(adev->dev, "gart size (%d) too small\n",
1367 amdgpu_gart_size = -1;
1370 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1371 /* gtt size must be greater or equal to 32M */
1372 dev_warn(adev->dev, "gtt size (%d) too small\n",
1374 amdgpu_gtt_size = -1;
1377 /* valid range is between 4 and 9 inclusive */
1378 if (amdgpu_vm_fragment_size != -1 &&
1379 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1380 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1381 amdgpu_vm_fragment_size = -1;
1384 if (amdgpu_sched_hw_submission < 2) {
1385 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1386 amdgpu_sched_hw_submission);
1387 amdgpu_sched_hw_submission = 2;
1388 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1389 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1390 amdgpu_sched_hw_submission);
1391 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1394 amdgpu_device_check_smu_prv_buffer_size(adev);
1396 amdgpu_device_check_vm_size(adev);
1398 amdgpu_device_check_block_size(adev);
1400 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1402 amdgpu_gmc_tmz_set(adev);
1404 amdgpu_gmc_noretry_set(adev);
1410 * amdgpu_switcheroo_set_state - set switcheroo state
1412 * @pdev: pci dev pointer
1413 * @state: vga_switcheroo state
1415 * Callback for the switcheroo driver. Suspends or resumes the
1416 * the asics before or after it is powered up using ACPI methods.
1418 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1419 enum vga_switcheroo_state state)
1421 struct drm_device *dev = pci_get_drvdata(pdev);
1424 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1427 if (state == VGA_SWITCHEROO_ON) {
1428 pr_info("switched on\n");
1429 /* don't suspend or resume card normally */
1430 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1432 pci_set_power_state(pdev, PCI_D0);
1433 amdgpu_device_load_pci_state(pdev);
1434 r = pci_enable_device(pdev);
1436 DRM_WARN("pci_enable_device failed (%d)\n", r);
1437 amdgpu_device_resume(dev, true);
1439 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1441 pr_info("switched off\n");
1442 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1443 amdgpu_device_suspend(dev, true);
1444 amdgpu_device_cache_pci_state(pdev);
1445 /* Shut down the device */
1446 pci_disable_device(pdev);
1447 pci_set_power_state(pdev, PCI_D3cold);
1448 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1453 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1455 * @pdev: pci dev pointer
1457 * Callback for the switcheroo driver. Check of the switcheroo
1458 * state can be changed.
1459 * Returns true if the state can be changed, false if not.
1461 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1463 struct drm_device *dev = pci_get_drvdata(pdev);
1466 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1467 * locking inversion with the driver load path. And the access here is
1468 * completely racy anyway. So don't bother with locking for now.
1470 return atomic_read(&dev->open_count) == 0;
1473 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1474 .set_gpu_state = amdgpu_switcheroo_set_state,
1476 .can_switch = amdgpu_switcheroo_can_switch,
1480 * amdgpu_device_ip_set_clockgating_state - set the CG state
1482 * @dev: amdgpu_device pointer
1483 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1484 * @state: clockgating state (gate or ungate)
1486 * Sets the requested clockgating state for all instances of
1487 * the hardware IP specified.
1488 * Returns the error code from the last instance.
1490 int amdgpu_device_ip_set_clockgating_state(void *dev,
1491 enum amd_ip_block_type block_type,
1492 enum amd_clockgating_state state)
1494 struct amdgpu_device *adev = dev;
1497 for (i = 0; i < adev->num_ip_blocks; i++) {
1498 if (!adev->ip_blocks[i].status.valid)
1500 if (adev->ip_blocks[i].version->type != block_type)
1502 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1504 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1505 (void *)adev, state);
1507 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1508 adev->ip_blocks[i].version->funcs->name, r);
1514 * amdgpu_device_ip_set_powergating_state - set the PG state
1516 * @dev: amdgpu_device pointer
1517 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1518 * @state: powergating state (gate or ungate)
1520 * Sets the requested powergating state for all instances of
1521 * the hardware IP specified.
1522 * Returns the error code from the last instance.
1524 int amdgpu_device_ip_set_powergating_state(void *dev,
1525 enum amd_ip_block_type block_type,
1526 enum amd_powergating_state state)
1528 struct amdgpu_device *adev = dev;
1531 for (i = 0; i < adev->num_ip_blocks; i++) {
1532 if (!adev->ip_blocks[i].status.valid)
1534 if (adev->ip_blocks[i].version->type != block_type)
1536 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1538 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1539 (void *)adev, state);
1541 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1542 adev->ip_blocks[i].version->funcs->name, r);
1548 * amdgpu_device_ip_get_clockgating_state - get the CG state
1550 * @adev: amdgpu_device pointer
1551 * @flags: clockgating feature flags
1553 * Walks the list of IPs on the device and updates the clockgating
1554 * flags for each IP.
1555 * Updates @flags with the feature flags for each hardware IP where
1556 * clockgating is enabled.
1558 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1563 for (i = 0; i < adev->num_ip_blocks; i++) {
1564 if (!adev->ip_blocks[i].status.valid)
1566 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1567 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1572 * amdgpu_device_ip_wait_for_idle - wait for idle
1574 * @adev: amdgpu_device pointer
1575 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1577 * Waits for the request hardware IP to be idle.
1578 * Returns 0 for success or a negative error code on failure.
1580 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1581 enum amd_ip_block_type block_type)
1585 for (i = 0; i < adev->num_ip_blocks; i++) {
1586 if (!adev->ip_blocks[i].status.valid)
1588 if (adev->ip_blocks[i].version->type == block_type) {
1589 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1600 * amdgpu_device_ip_is_idle - is the hardware IP idle
1602 * @adev: amdgpu_device pointer
1603 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1605 * Check if the hardware IP is idle or not.
1606 * Returns true if it the IP is idle, false if not.
1608 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1609 enum amd_ip_block_type block_type)
1613 for (i = 0; i < adev->num_ip_blocks; i++) {
1614 if (!adev->ip_blocks[i].status.valid)
1616 if (adev->ip_blocks[i].version->type == block_type)
1617 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1624 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1626 * @adev: amdgpu_device pointer
1627 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1629 * Returns a pointer to the hardware IP block structure
1630 * if it exists for the asic, otherwise NULL.
1632 struct amdgpu_ip_block *
1633 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1634 enum amd_ip_block_type type)
1638 for (i = 0; i < adev->num_ip_blocks; i++)
1639 if (adev->ip_blocks[i].version->type == type)
1640 return &adev->ip_blocks[i];
1646 * amdgpu_device_ip_block_version_cmp
1648 * @adev: amdgpu_device pointer
1649 * @type: enum amd_ip_block_type
1650 * @major: major version
1651 * @minor: minor version
1653 * return 0 if equal or greater
1654 * return 1 if smaller or the ip_block doesn't exist
1656 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1657 enum amd_ip_block_type type,
1658 u32 major, u32 minor)
1660 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1662 if (ip_block && ((ip_block->version->major > major) ||
1663 ((ip_block->version->major == major) &&
1664 (ip_block->version->minor >= minor))))
1671 * amdgpu_device_ip_block_add
1673 * @adev: amdgpu_device pointer
1674 * @ip_block_version: pointer to the IP to add
1676 * Adds the IP block driver information to the collection of IPs
1679 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1680 const struct amdgpu_ip_block_version *ip_block_version)
1682 if (!ip_block_version)
1685 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1686 ip_block_version->funcs->name);
1688 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1694 * amdgpu_device_enable_virtual_display - enable virtual display feature
1696 * @adev: amdgpu_device pointer
1698 * Enabled the virtual display feature if the user has enabled it via
1699 * the module parameter virtual_display. This feature provides a virtual
1700 * display hardware on headless boards or in virtualized environments.
1701 * This function parses and validates the configuration string specified by
1702 * the user and configues the virtual display configuration (number of
1703 * virtual connectors, crtcs, etc.) specified.
1705 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1707 adev->enable_virtual_display = false;
1709 if (amdgpu_virtual_display) {
1710 const char *pci_address_name = pci_name(adev->pdev);
1711 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1713 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1714 pciaddstr_tmp = pciaddstr;
1715 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1716 pciaddname = strsep(&pciaddname_tmp, ",");
1717 if (!strcmp("all", pciaddname)
1718 || !strcmp(pci_address_name, pciaddname)) {
1722 adev->enable_virtual_display = true;
1725 res = kstrtol(pciaddname_tmp, 10,
1733 adev->mode_info.num_crtc = num_crtc;
1735 adev->mode_info.num_crtc = 1;
1741 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1742 amdgpu_virtual_display, pci_address_name,
1743 adev->enable_virtual_display, adev->mode_info.num_crtc);
1750 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1752 * @adev: amdgpu_device pointer
1754 * Parses the asic configuration parameters specified in the gpu info
1755 * firmware and makes them availale to the driver for use in configuring
1757 * Returns 0 on success, -EINVAL on failure.
1759 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1761 const char *chip_name;
1764 const struct gpu_info_firmware_header_v1_0 *hdr;
1766 adev->firmware.gpu_info_fw = NULL;
1768 if (adev->mman.discovery_bin) {
1769 amdgpu_discovery_get_gfx_info(adev);
1772 * FIXME: The bounding box is still needed by Navi12, so
1773 * temporarily read it from gpu_info firmware. Should be droped
1774 * when DAL no longer needs it.
1776 if (adev->asic_type != CHIP_NAVI12)
1780 switch (adev->asic_type) {
1781 #ifdef CONFIG_DRM_AMDGPU_SI
1788 #ifdef CONFIG_DRM_AMDGPU_CIK
1798 case CHIP_POLARIS10:
1799 case CHIP_POLARIS11:
1800 case CHIP_POLARIS12:
1805 case CHIP_ALDEBARAN:
1806 case CHIP_SIENNA_CICHLID:
1807 case CHIP_NAVY_FLOUNDER:
1808 case CHIP_DIMGREY_CAVEFISH:
1812 chip_name = "vega10";
1815 chip_name = "vega12";
1818 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1819 chip_name = "raven2";
1820 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1821 chip_name = "picasso";
1823 chip_name = "raven";
1826 chip_name = "arcturus";
1829 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1830 chip_name = "renoir";
1832 chip_name = "green_sardine";
1835 chip_name = "navi10";
1838 chip_name = "navi14";
1841 chip_name = "navi12";
1844 chip_name = "vangogh";
1848 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1849 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1852 "Failed to load gpu_info firmware \"%s\"\n",
1856 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1859 "Failed to validate gpu_info firmware \"%s\"\n",
1864 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1865 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1867 switch (hdr->version_major) {
1870 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1871 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1872 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1875 * Should be droped when DAL no longer needs it.
1877 if (adev->asic_type == CHIP_NAVI12)
1878 goto parse_soc_bounding_box;
1880 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1881 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1882 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1883 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1884 adev->gfx.config.max_texture_channel_caches =
1885 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1886 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1887 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1888 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1889 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1890 adev->gfx.config.double_offchip_lds_buf =
1891 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1892 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1893 adev->gfx.cu_info.max_waves_per_simd =
1894 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1895 adev->gfx.cu_info.max_scratch_slots_per_cu =
1896 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1897 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1898 if (hdr->version_minor >= 1) {
1899 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1900 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1901 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1902 adev->gfx.config.num_sc_per_sh =
1903 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1904 adev->gfx.config.num_packer_per_sc =
1905 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1908 parse_soc_bounding_box:
1910 * soc bounding box info is not integrated in disocovery table,
1911 * we always need to parse it from gpu info firmware if needed.
1913 if (hdr->version_minor == 2) {
1914 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1915 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1916 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1917 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1923 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1932 * amdgpu_device_ip_early_init - run early init for hardware IPs
1934 * @adev: amdgpu_device pointer
1936 * Early initialization pass for hardware IPs. The hardware IPs that make
1937 * up each asic are discovered each IP's early_init callback is run. This
1938 * is the first stage in initializing the asic.
1939 * Returns 0 on success, negative error code on failure.
1941 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1945 amdgpu_device_enable_virtual_display(adev);
1947 if (amdgpu_sriov_vf(adev)) {
1948 r = amdgpu_virt_request_full_gpu(adev, true);
1953 switch (adev->asic_type) {
1954 #ifdef CONFIG_DRM_AMDGPU_SI
1960 adev->family = AMDGPU_FAMILY_SI;
1961 r = si_set_ip_blocks(adev);
1966 #ifdef CONFIG_DRM_AMDGPU_CIK
1972 if (adev->flags & AMD_IS_APU)
1973 adev->family = AMDGPU_FAMILY_KV;
1975 adev->family = AMDGPU_FAMILY_CI;
1977 r = cik_set_ip_blocks(adev);
1985 case CHIP_POLARIS10:
1986 case CHIP_POLARIS11:
1987 case CHIP_POLARIS12:
1991 if (adev->flags & AMD_IS_APU)
1992 adev->family = AMDGPU_FAMILY_CZ;
1994 adev->family = AMDGPU_FAMILY_VI;
1996 r = vi_set_ip_blocks(adev);
2006 case CHIP_ALDEBARAN:
2007 if (adev->flags & AMD_IS_APU)
2008 adev->family = AMDGPU_FAMILY_RV;
2010 adev->family = AMDGPU_FAMILY_AI;
2012 r = soc15_set_ip_blocks(adev);
2019 case CHIP_SIENNA_CICHLID:
2020 case CHIP_NAVY_FLOUNDER:
2021 case CHIP_DIMGREY_CAVEFISH:
2023 if (adev->asic_type == CHIP_VANGOGH)
2024 adev->family = AMDGPU_FAMILY_VGH;
2026 adev->family = AMDGPU_FAMILY_NV;
2028 r = nv_set_ip_blocks(adev);
2033 /* FIXME: not supported yet */
2037 amdgpu_amdkfd_device_probe(adev);
2039 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2040 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2041 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2042 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2043 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2045 for (i = 0; i < adev->num_ip_blocks; i++) {
2046 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2047 DRM_ERROR("disabled ip block: %d <%s>\n",
2048 i, adev->ip_blocks[i].version->funcs->name);
2049 adev->ip_blocks[i].status.valid = false;
2051 if (adev->ip_blocks[i].version->funcs->early_init) {
2052 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2054 adev->ip_blocks[i].status.valid = false;
2056 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2057 adev->ip_blocks[i].version->funcs->name, r);
2060 adev->ip_blocks[i].status.valid = true;
2063 adev->ip_blocks[i].status.valid = true;
2066 /* get the vbios after the asic_funcs are set up */
2067 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2068 r = amdgpu_device_parse_gpu_info_fw(adev);
2073 if (!amdgpu_get_bios(adev))
2076 r = amdgpu_atombios_init(adev);
2078 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2079 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2085 adev->cg_flags &= amdgpu_cg_mask;
2086 adev->pg_flags &= amdgpu_pg_mask;
2091 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2095 for (i = 0; i < adev->num_ip_blocks; i++) {
2096 if (!adev->ip_blocks[i].status.sw)
2098 if (adev->ip_blocks[i].status.hw)
2100 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2101 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2102 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2103 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2105 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2106 adev->ip_blocks[i].version->funcs->name, r);
2109 adev->ip_blocks[i].status.hw = true;
2116 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2120 for (i = 0; i < adev->num_ip_blocks; i++) {
2121 if (!adev->ip_blocks[i].status.sw)
2123 if (adev->ip_blocks[i].status.hw)
2125 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2127 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2128 adev->ip_blocks[i].version->funcs->name, r);
2131 adev->ip_blocks[i].status.hw = true;
2137 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2141 uint32_t smu_version;
2143 if (adev->asic_type >= CHIP_VEGA10) {
2144 for (i = 0; i < adev->num_ip_blocks; i++) {
2145 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2148 if (!adev->ip_blocks[i].status.sw)
2151 /* no need to do the fw loading again if already done*/
2152 if (adev->ip_blocks[i].status.hw == true)
2155 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2156 r = adev->ip_blocks[i].version->funcs->resume(adev);
2158 DRM_ERROR("resume of IP block <%s> failed %d\n",
2159 adev->ip_blocks[i].version->funcs->name, r);
2163 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2165 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2166 adev->ip_blocks[i].version->funcs->name, r);
2171 adev->ip_blocks[i].status.hw = true;
2176 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2177 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2183 * amdgpu_device_ip_init - run init for hardware IPs
2185 * @adev: amdgpu_device pointer
2187 * Main initialization pass for hardware IPs. The list of all the hardware
2188 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2189 * are run. sw_init initializes the software state associated with each IP
2190 * and hw_init initializes the hardware associated with each IP.
2191 * Returns 0 on success, negative error code on failure.
2193 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2197 r = amdgpu_ras_init(adev);
2201 for (i = 0; i < adev->num_ip_blocks; i++) {
2202 if (!adev->ip_blocks[i].status.valid)
2204 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2206 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2207 adev->ip_blocks[i].version->funcs->name, r);
2210 adev->ip_blocks[i].status.sw = true;
2212 /* need to do gmc hw init early so we can allocate gpu mem */
2213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2214 r = amdgpu_device_vram_scratch_init(adev);
2216 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2219 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2221 DRM_ERROR("hw_init %d failed %d\n", i, r);
2224 r = amdgpu_device_wb_init(adev);
2226 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2229 adev->ip_blocks[i].status.hw = true;
2231 /* right after GMC hw init, we create CSA */
2232 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2233 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2234 AMDGPU_GEM_DOMAIN_VRAM,
2237 DRM_ERROR("allocate CSA failed %d\n", r);
2244 if (amdgpu_sriov_vf(adev))
2245 amdgpu_virt_init_data_exchange(adev);
2247 r = amdgpu_ib_pool_init(adev);
2249 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2250 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2254 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2258 r = amdgpu_device_ip_hw_init_phase1(adev);
2262 r = amdgpu_device_fw_loading(adev);
2266 r = amdgpu_device_ip_hw_init_phase2(adev);
2271 * retired pages will be loaded from eeprom and reserved here,
2272 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2273 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2274 * for I2C communication which only true at this point.
2276 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2277 * failure from bad gpu situation and stop amdgpu init process
2278 * accordingly. For other failed cases, it will still release all
2279 * the resource and print error message, rather than returning one
2280 * negative value to upper level.
2282 * Note: theoretically, this should be called before all vram allocations
2283 * to protect retired page from abusing
2285 r = amdgpu_ras_recovery_init(adev);
2289 if (adev->gmc.xgmi.num_physical_nodes > 1)
2290 amdgpu_xgmi_add_device(adev);
2292 /* Don't init kfd if whole hive need to be reset during init */
2293 if (!adev->gmc.xgmi.pending_reset)
2294 amdgpu_amdkfd_device_init(adev);
2296 amdgpu_fru_get_product_info(adev);
2299 if (amdgpu_sriov_vf(adev))
2300 amdgpu_virt_release_full_gpu(adev, true);
2306 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2308 * @adev: amdgpu_device pointer
2310 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2311 * this function before a GPU reset. If the value is retained after a
2312 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2314 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2316 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2320 * amdgpu_device_check_vram_lost - check if vram is valid
2322 * @adev: amdgpu_device pointer
2324 * Checks the reset magic value written to the gart pointer in VRAM.
2325 * The driver calls this after a GPU reset to see if the contents of
2326 * VRAM is lost or now.
2327 * returns true if vram is lost, false if not.
2329 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2331 if (memcmp(adev->gart.ptr, adev->reset_magic,
2332 AMDGPU_RESET_MAGIC_NUM))
2335 if (!amdgpu_in_reset(adev))
2339 * For all ASICs with baco/mode1 reset, the VRAM is
2340 * always assumed to be lost.
2342 switch (amdgpu_asic_reset_method(adev)) {
2343 case AMD_RESET_METHOD_BACO:
2344 case AMD_RESET_METHOD_MODE1:
2352 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2354 * @adev: amdgpu_device pointer
2355 * @state: clockgating state (gate or ungate)
2357 * The list of all the hardware IPs that make up the asic is walked and the
2358 * set_clockgating_state callbacks are run.
2359 * Late initialization pass enabling clockgating for hardware IPs.
2360 * Fini or suspend, pass disabling clockgating for hardware IPs.
2361 * Returns 0 on success, negative error code on failure.
2364 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2365 enum amd_clockgating_state state)
2369 if (amdgpu_emu_mode == 1)
2372 for (j = 0; j < adev->num_ip_blocks; j++) {
2373 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2374 if (!adev->ip_blocks[i].status.late_initialized)
2376 /* skip CG for VCE/UVD, it's handled specially */
2377 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2378 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2379 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2380 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2381 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2382 /* enable clockgating to save power */
2383 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2386 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2387 adev->ip_blocks[i].version->funcs->name, r);
2396 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2400 if (amdgpu_emu_mode == 1)
2403 for (j = 0; j < adev->num_ip_blocks; j++) {
2404 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2405 if (!adev->ip_blocks[i].status.late_initialized)
2407 /* skip CG for VCE/UVD, it's handled specially */
2408 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2409 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2410 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2411 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2412 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2413 /* enable powergating to save power */
2414 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2417 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2418 adev->ip_blocks[i].version->funcs->name, r);
2426 static int amdgpu_device_enable_mgpu_fan_boost(void)
2428 struct amdgpu_gpu_instance *gpu_ins;
2429 struct amdgpu_device *adev;
2432 mutex_lock(&mgpu_info.mutex);
2435 * MGPU fan boost feature should be enabled
2436 * only when there are two or more dGPUs in
2439 if (mgpu_info.num_dgpu < 2)
2442 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2443 gpu_ins = &(mgpu_info.gpu_ins[i]);
2444 adev = gpu_ins->adev;
2445 if (!(adev->flags & AMD_IS_APU) &&
2446 !gpu_ins->mgpu_fan_enabled) {
2447 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2451 gpu_ins->mgpu_fan_enabled = 1;
2456 mutex_unlock(&mgpu_info.mutex);
2462 * amdgpu_device_ip_late_init - run late init for hardware IPs
2464 * @adev: amdgpu_device pointer
2466 * Late initialization pass for hardware IPs. The list of all the hardware
2467 * IPs that make up the asic is walked and the late_init callbacks are run.
2468 * late_init covers any special initialization that an IP requires
2469 * after all of the have been initialized or something that needs to happen
2470 * late in the init process.
2471 * Returns 0 on success, negative error code on failure.
2473 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2475 struct amdgpu_gpu_instance *gpu_instance;
2478 for (i = 0; i < adev->num_ip_blocks; i++) {
2479 if (!adev->ip_blocks[i].status.hw)
2481 if (adev->ip_blocks[i].version->funcs->late_init) {
2482 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2484 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2485 adev->ip_blocks[i].version->funcs->name, r);
2489 adev->ip_blocks[i].status.late_initialized = true;
2492 amdgpu_ras_set_error_query_ready(adev, true);
2494 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2495 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2497 amdgpu_device_fill_reset_magic(adev);
2499 r = amdgpu_device_enable_mgpu_fan_boost();
2501 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2503 /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2504 if (adev->asic_type == CHIP_ARCTURUS &&
2505 amdgpu_passthrough(adev) &&
2506 adev->gmc.xgmi.num_physical_nodes > 1)
2507 smu_set_light_sbr(&adev->smu, true);
2509 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2510 mutex_lock(&mgpu_info.mutex);
2513 * Reset device p-state to low as this was booted with high.
2515 * This should be performed only after all devices from the same
2516 * hive get initialized.
2518 * However, it's unknown how many device in the hive in advance.
2519 * As this is counted one by one during devices initializations.
2521 * So, we wait for all XGMI interlinked devices initialized.
2522 * This may bring some delays as those devices may come from
2523 * different hives. But that should be OK.
2525 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2526 for (i = 0; i < mgpu_info.num_gpu; i++) {
2527 gpu_instance = &(mgpu_info.gpu_ins[i]);
2528 if (gpu_instance->adev->flags & AMD_IS_APU)
2531 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2532 AMDGPU_XGMI_PSTATE_MIN);
2534 DRM_ERROR("pstate setting failed (%d).\n", r);
2540 mutex_unlock(&mgpu_info.mutex);
2547 * amdgpu_device_ip_fini - run fini for hardware IPs
2549 * @adev: amdgpu_device pointer
2551 * Main teardown pass for hardware IPs. The list of all the hardware
2552 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2553 * are run. hw_fini tears down the hardware associated with each IP
2554 * and sw_fini tears down any software state associated with each IP.
2555 * Returns 0 on success, negative error code on failure.
2557 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2561 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2562 amdgpu_virt_release_ras_err_handler_data(adev);
2564 amdgpu_ras_pre_fini(adev);
2566 if (adev->gmc.xgmi.num_physical_nodes > 1)
2567 amdgpu_xgmi_remove_device(adev);
2569 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2570 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2572 amdgpu_amdkfd_device_fini(adev);
2574 /* need to disable SMC first */
2575 for (i = 0; i < adev->num_ip_blocks; i++) {
2576 if (!adev->ip_blocks[i].status.hw)
2578 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2579 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2580 /* XXX handle errors */
2582 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2583 adev->ip_blocks[i].version->funcs->name, r);
2585 adev->ip_blocks[i].status.hw = false;
2590 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2591 if (!adev->ip_blocks[i].status.hw)
2594 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2595 /* XXX handle errors */
2597 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2598 adev->ip_blocks[i].version->funcs->name, r);
2601 adev->ip_blocks[i].status.hw = false;
2605 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2606 if (!adev->ip_blocks[i].status.sw)
2609 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2610 amdgpu_ucode_free_bo(adev);
2611 amdgpu_free_static_csa(&adev->virt.csa_obj);
2612 amdgpu_device_wb_fini(adev);
2613 amdgpu_device_vram_scratch_fini(adev);
2614 amdgpu_ib_pool_fini(adev);
2617 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2618 /* XXX handle errors */
2620 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2621 adev->ip_blocks[i].version->funcs->name, r);
2623 adev->ip_blocks[i].status.sw = false;
2624 adev->ip_blocks[i].status.valid = false;
2627 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2628 if (!adev->ip_blocks[i].status.late_initialized)
2630 if (adev->ip_blocks[i].version->funcs->late_fini)
2631 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2632 adev->ip_blocks[i].status.late_initialized = false;
2635 amdgpu_ras_fini(adev);
2637 if (amdgpu_sriov_vf(adev))
2638 if (amdgpu_virt_release_full_gpu(adev, false))
2639 DRM_ERROR("failed to release exclusive mode on fini\n");
2645 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2647 * @work: work_struct.
2649 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2651 struct amdgpu_device *adev =
2652 container_of(work, struct amdgpu_device, delayed_init_work.work);
2655 r = amdgpu_ib_ring_tests(adev);
2657 DRM_ERROR("ib ring test failed (%d).\n", r);
2660 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2662 struct amdgpu_device *adev =
2663 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2665 mutex_lock(&adev->gfx.gfx_off_mutex);
2666 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2667 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2668 adev->gfx.gfx_off_state = true;
2670 mutex_unlock(&adev->gfx.gfx_off_mutex);
2674 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2676 * @adev: amdgpu_device pointer
2678 * Main suspend function for hardware IPs. The list of all the hardware
2679 * IPs that make up the asic is walked, clockgating is disabled and the
2680 * suspend callbacks are run. suspend puts the hardware and software state
2681 * in each IP into a state suitable for suspend.
2682 * Returns 0 on success, negative error code on failure.
2684 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2688 if (adev->in_poweroff_reboot_com ||
2689 !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
2690 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2691 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2694 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2695 if (!adev->ip_blocks[i].status.valid)
2698 /* displays are handled separately */
2699 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2702 /* XXX handle errors */
2703 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2704 /* XXX handle errors */
2706 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2707 adev->ip_blocks[i].version->funcs->name, r);
2711 adev->ip_blocks[i].status.hw = false;
2718 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2720 * @adev: amdgpu_device pointer
2722 * Main suspend function for hardware IPs. The list of all the hardware
2723 * IPs that make up the asic is walked, clockgating is disabled and the
2724 * suspend callbacks are run. suspend puts the hardware and software state
2725 * in each IP into a state suitable for suspend.
2726 * Returns 0 on success, negative error code on failure.
2728 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2732 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2733 if (!adev->ip_blocks[i].status.valid)
2735 /* displays are handled in phase1 */
2736 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2738 /* PSP lost connection when err_event_athub occurs */
2739 if (amdgpu_ras_intr_triggered() &&
2740 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2741 adev->ip_blocks[i].status.hw = false;
2745 /* skip unnecessary suspend if we do not initialize them yet */
2746 if (adev->gmc.xgmi.pending_reset &&
2747 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2748 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2749 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2750 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2751 adev->ip_blocks[i].status.hw = false;
2754 /* XXX handle errors */
2755 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2756 /* XXX handle errors */
2758 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2759 adev->ip_blocks[i].version->funcs->name, r);
2761 adev->ip_blocks[i].status.hw = false;
2762 /* handle putting the SMC in the appropriate state */
2763 if(!amdgpu_sriov_vf(adev)){
2764 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2765 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2767 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2768 adev->mp1_state, r);
2779 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2781 * @adev: amdgpu_device pointer
2783 * Main suspend function for hardware IPs. The list of all the hardware
2784 * IPs that make up the asic is walked, clockgating is disabled and the
2785 * suspend callbacks are run. suspend puts the hardware and software state
2786 * in each IP into a state suitable for suspend.
2787 * Returns 0 on success, negative error code on failure.
2789 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2793 if (amdgpu_sriov_vf(adev)) {
2794 amdgpu_virt_fini_data_exchange(adev);
2795 amdgpu_virt_request_full_gpu(adev, false);
2798 r = amdgpu_device_ip_suspend_phase1(adev);
2801 r = amdgpu_device_ip_suspend_phase2(adev);
2803 if (amdgpu_sriov_vf(adev))
2804 amdgpu_virt_release_full_gpu(adev, false);
2809 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2813 static enum amd_ip_block_type ip_order[] = {
2814 AMD_IP_BLOCK_TYPE_GMC,
2815 AMD_IP_BLOCK_TYPE_COMMON,
2816 AMD_IP_BLOCK_TYPE_PSP,
2817 AMD_IP_BLOCK_TYPE_IH,
2820 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2822 struct amdgpu_ip_block *block;
2824 block = &adev->ip_blocks[i];
2825 block->status.hw = false;
2827 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2829 if (block->version->type != ip_order[j] ||
2830 !block->status.valid)
2833 r = block->version->funcs->hw_init(adev);
2834 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2837 block->status.hw = true;
2844 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2848 static enum amd_ip_block_type ip_order[] = {
2849 AMD_IP_BLOCK_TYPE_SMC,
2850 AMD_IP_BLOCK_TYPE_DCE,
2851 AMD_IP_BLOCK_TYPE_GFX,
2852 AMD_IP_BLOCK_TYPE_SDMA,
2853 AMD_IP_BLOCK_TYPE_UVD,
2854 AMD_IP_BLOCK_TYPE_VCE,
2855 AMD_IP_BLOCK_TYPE_VCN
2858 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2860 struct amdgpu_ip_block *block;
2862 for (j = 0; j < adev->num_ip_blocks; j++) {
2863 block = &adev->ip_blocks[j];
2865 if (block->version->type != ip_order[i] ||
2866 !block->status.valid ||
2870 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2871 r = block->version->funcs->resume(adev);
2873 r = block->version->funcs->hw_init(adev);
2875 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2878 block->status.hw = true;
2886 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2888 * @adev: amdgpu_device pointer
2890 * First resume function for hardware IPs. The list of all the hardware
2891 * IPs that make up the asic is walked and the resume callbacks are run for
2892 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2893 * after a suspend and updates the software state as necessary. This
2894 * function is also used for restoring the GPU after a GPU reset.
2895 * Returns 0 on success, negative error code on failure.
2897 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2901 for (i = 0; i < adev->num_ip_blocks; i++) {
2902 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2904 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2905 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2906 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2908 r = adev->ip_blocks[i].version->funcs->resume(adev);
2910 DRM_ERROR("resume of IP block <%s> failed %d\n",
2911 adev->ip_blocks[i].version->funcs->name, r);
2914 adev->ip_blocks[i].status.hw = true;
2922 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2924 * @adev: amdgpu_device pointer
2926 * First resume function for hardware IPs. The list of all the hardware
2927 * IPs that make up the asic is walked and the resume callbacks are run for
2928 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2929 * functional state after a suspend and updates the software state as
2930 * necessary. This function is also used for restoring the GPU after a GPU
2932 * Returns 0 on success, negative error code on failure.
2934 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2938 for (i = 0; i < adev->num_ip_blocks; i++) {
2939 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2941 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2942 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2943 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2944 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2946 r = adev->ip_blocks[i].version->funcs->resume(adev);
2948 DRM_ERROR("resume of IP block <%s> failed %d\n",
2949 adev->ip_blocks[i].version->funcs->name, r);
2952 adev->ip_blocks[i].status.hw = true;
2959 * amdgpu_device_ip_resume - run resume for hardware IPs
2961 * @adev: amdgpu_device pointer
2963 * Main resume function for hardware IPs. The hardware IPs
2964 * are split into two resume functions because they are
2965 * are also used in in recovering from a GPU reset and some additional
2966 * steps need to be take between them. In this case (S3/S4) they are
2968 * Returns 0 on success, negative error code on failure.
2970 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2974 r = amdgpu_device_ip_resume_phase1(adev);
2978 r = amdgpu_device_fw_loading(adev);
2982 r = amdgpu_device_ip_resume_phase2(adev);
2988 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2990 * @adev: amdgpu_device pointer
2992 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2994 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2996 if (amdgpu_sriov_vf(adev)) {
2997 if (adev->is_atom_fw) {
2998 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2999 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3001 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3002 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3005 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3006 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3011 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3013 * @asic_type: AMD asic type
3015 * Check if there is DC (new modesetting infrastructre) support for an asic.
3016 * returns true if DC has support, false if not.
3018 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3020 switch (asic_type) {
3021 #if defined(CONFIG_DRM_AMD_DC)
3022 #if defined(CONFIG_DRM_AMD_DC_SI)
3033 * We have systems in the wild with these ASICs that require
3034 * LVDS and VGA support which is not supported with DC.
3036 * Fallback to the non-DC driver here by default so as not to
3037 * cause regressions.
3039 return amdgpu_dc > 0;
3043 case CHIP_POLARIS10:
3044 case CHIP_POLARIS11:
3045 case CHIP_POLARIS12:
3052 #if defined(CONFIG_DRM_AMD_DC_DCN)
3058 case CHIP_SIENNA_CICHLID:
3059 case CHIP_NAVY_FLOUNDER:
3060 case CHIP_DIMGREY_CAVEFISH:
3063 return amdgpu_dc != 0;
3067 DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3068 "but isn't supported by ASIC, ignoring\n");
3074 * amdgpu_device_has_dc_support - check if dc is supported
3076 * @adev: amdgpu_device pointer
3078 * Returns true for supported, false for not supported
3080 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3082 if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3085 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3089 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3091 struct amdgpu_device *adev =
3092 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3093 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3095 /* It's a bug to not have a hive within this function */
3100 * Use task barrier to synchronize all xgmi reset works across the
3101 * hive. task_barrier_enter and task_barrier_exit will block
3102 * until all the threads running the xgmi reset works reach
3103 * those points. task_barrier_full will do both blocks.
3105 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3107 task_barrier_enter(&hive->tb);
3108 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3110 if (adev->asic_reset_res)
3113 task_barrier_exit(&hive->tb);
3114 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3116 if (adev->asic_reset_res)
3119 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3120 adev->mmhub.funcs->reset_ras_error_count(adev);
3123 task_barrier_full(&hive->tb);
3124 adev->asic_reset_res = amdgpu_asic_reset(adev);
3128 if (adev->asic_reset_res)
3129 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3130 adev->asic_reset_res, adev_to_drm(adev)->unique);
3131 amdgpu_put_xgmi_hive(hive);
3134 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3136 char *input = amdgpu_lockup_timeout;
3137 char *timeout_setting = NULL;
3143 * By default timeout for non compute jobs is 10000.
3144 * And there is no timeout enforced on compute jobs.
3145 * In SR-IOV or passthrough mode, timeout for compute
3146 * jobs are 60000 by default.
3148 adev->gfx_timeout = msecs_to_jiffies(10000);
3149 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3150 if (amdgpu_sriov_vf(adev))
3151 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3152 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3153 else if (amdgpu_passthrough(adev))
3154 adev->compute_timeout = msecs_to_jiffies(60000);
3156 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3158 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3159 while ((timeout_setting = strsep(&input, ",")) &&
3160 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3161 ret = kstrtol(timeout_setting, 0, &timeout);
3168 } else if (timeout < 0) {
3169 timeout = MAX_SCHEDULE_TIMEOUT;
3171 timeout = msecs_to_jiffies(timeout);
3176 adev->gfx_timeout = timeout;
3179 adev->compute_timeout = timeout;
3182 adev->sdma_timeout = timeout;
3185 adev->video_timeout = timeout;
3192 * There is only one value specified and
3193 * it should apply to all non-compute jobs.
3196 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3197 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3198 adev->compute_timeout = adev->gfx_timeout;
3205 static const struct attribute *amdgpu_dev_attributes[] = {
3206 &dev_attr_product_name.attr,
3207 &dev_attr_product_number.attr,
3208 &dev_attr_serial_number.attr,
3209 &dev_attr_pcie_replay_count.attr,
3215 * amdgpu_device_init - initialize the driver
3217 * @adev: amdgpu_device pointer
3218 * @flags: driver flags
3220 * Initializes the driver info and hw (all asics).
3221 * Returns 0 for success or an error on failure.
3222 * Called at driver startup.
3224 int amdgpu_device_init(struct amdgpu_device *adev,
3227 struct drm_device *ddev = adev_to_drm(adev);
3228 struct pci_dev *pdev = adev->pdev;
3233 adev->shutdown = false;
3234 adev->flags = flags;
3236 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3237 adev->asic_type = amdgpu_force_asic_type;
3239 adev->asic_type = flags & AMD_ASIC_MASK;
3241 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3242 if (amdgpu_emu_mode == 1)
3243 adev->usec_timeout *= 10;
3244 adev->gmc.gart_size = 512 * 1024 * 1024;
3245 adev->accel_working = false;
3246 adev->num_rings = 0;
3247 adev->mman.buffer_funcs = NULL;
3248 adev->mman.buffer_funcs_ring = NULL;
3249 adev->vm_manager.vm_pte_funcs = NULL;
3250 adev->vm_manager.vm_pte_num_scheds = 0;
3251 adev->gmc.gmc_funcs = NULL;
3252 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3253 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3255 adev->smc_rreg = &amdgpu_invalid_rreg;
3256 adev->smc_wreg = &amdgpu_invalid_wreg;
3257 adev->pcie_rreg = &amdgpu_invalid_rreg;
3258 adev->pcie_wreg = &amdgpu_invalid_wreg;
3259 adev->pciep_rreg = &amdgpu_invalid_rreg;
3260 adev->pciep_wreg = &amdgpu_invalid_wreg;
3261 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3262 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3263 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3264 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3265 adev->didt_rreg = &amdgpu_invalid_rreg;
3266 adev->didt_wreg = &amdgpu_invalid_wreg;
3267 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3268 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3269 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3270 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3272 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3273 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3274 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3276 /* mutex initialization are all done here so we
3277 * can recall function without having locking issues */
3278 mutex_init(&adev->firmware.mutex);
3279 mutex_init(&adev->pm.mutex);
3280 mutex_init(&adev->gfx.gpu_clock_mutex);
3281 mutex_init(&adev->srbm_mutex);
3282 mutex_init(&adev->gfx.pipe_reserve_mutex);
3283 mutex_init(&adev->gfx.gfx_off_mutex);
3284 mutex_init(&adev->grbm_idx_mutex);
3285 mutex_init(&adev->mn_lock);
3286 mutex_init(&adev->virt.vf_errors.lock);
3287 hash_init(adev->mn_hash);
3288 atomic_set(&adev->in_gpu_reset, 0);
3289 init_rwsem(&adev->reset_sem);
3290 mutex_init(&adev->psp.mutex);
3291 mutex_init(&adev->notifier_lock);
3293 r = amdgpu_device_check_arguments(adev);
3297 spin_lock_init(&adev->mmio_idx_lock);
3298 spin_lock_init(&adev->smc_idx_lock);
3299 spin_lock_init(&adev->pcie_idx_lock);
3300 spin_lock_init(&adev->uvd_ctx_idx_lock);
3301 spin_lock_init(&adev->didt_idx_lock);
3302 spin_lock_init(&adev->gc_cac_idx_lock);
3303 spin_lock_init(&adev->se_cac_idx_lock);
3304 spin_lock_init(&adev->audio_endpt_idx_lock);
3305 spin_lock_init(&adev->mm_stats.lock);
3307 INIT_LIST_HEAD(&adev->shadow_list);
3308 mutex_init(&adev->shadow_list_lock);
3310 INIT_LIST_HEAD(&adev->reset_list);
3312 INIT_DELAYED_WORK(&adev->delayed_init_work,
3313 amdgpu_device_delayed_init_work_handler);
3314 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3315 amdgpu_device_delay_enable_gfx_off);
3317 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3319 adev->gfx.gfx_off_req_count = 1;
3320 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3322 atomic_set(&adev->throttling_logging_enabled, 1);
3324 * If throttling continues, logging will be performed every minute
3325 * to avoid log flooding. "-1" is subtracted since the thermal
3326 * throttling interrupt comes every second. Thus, the total logging
3327 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3328 * for throttling interrupt) = 60 seconds.
3330 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3331 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3333 /* Registers mapping */
3334 /* TODO: block userspace mapping of io register */
3335 if (adev->asic_type >= CHIP_BONAIRE) {
3336 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3337 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3339 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3340 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3343 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3344 if (adev->rmmio == NULL) {
3347 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3348 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3350 /* enable PCIE atomic ops */
3351 r = pci_enable_atomic_ops_to_root(adev->pdev,
3352 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3353 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3355 adev->have_atomics_support = false;
3356 DRM_INFO("PCIE atomic ops is not supported\n");
3358 adev->have_atomics_support = true;
3361 amdgpu_device_get_pcie_info(adev);
3364 DRM_INFO("MCBP is enabled\n");
3366 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3367 adev->enable_mes = true;
3369 /* detect hw virtualization here */
3370 amdgpu_detect_virtualization(adev);
3372 r = amdgpu_device_get_job_timeout_settings(adev);
3374 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3378 /* early init functions */
3379 r = amdgpu_device_ip_early_init(adev);
3383 /* doorbell bar mapping and doorbell index init*/
3384 amdgpu_device_doorbell_init(adev);
3386 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3387 /* this will fail for cards that aren't VGA class devices, just
3389 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3390 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3392 if (amdgpu_device_supports_px(ddev)) {
3394 vga_switcheroo_register_client(adev->pdev,
3395 &amdgpu_switcheroo_ops, px);
3396 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3399 if (amdgpu_emu_mode == 1) {
3400 /* post the asic on emulation mode */
3401 emu_soc_asic_init(adev);
3402 goto fence_driver_init;
3405 /* detect if we are with an SRIOV vbios */
3406 amdgpu_device_detect_sriov_bios(adev);
3408 /* check if we need to reset the asic
3409 * E.g., driver was not cleanly unloaded previously, etc.
3411 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3412 if (adev->gmc.xgmi.num_physical_nodes) {
3413 dev_info(adev->dev, "Pending hive reset.\n");
3414 adev->gmc.xgmi.pending_reset = true;
3415 /* Only need to init necessary block for SMU to handle the reset */
3416 for (i = 0; i < adev->num_ip_blocks; i++) {
3417 if (!adev->ip_blocks[i].status.valid)
3419 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3420 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3421 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3422 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3423 DRM_DEBUG("IP %s disabled for hw_init.\n",
3424 adev->ip_blocks[i].version->funcs->name);
3425 adev->ip_blocks[i].status.hw = true;
3429 r = amdgpu_asic_reset(adev);
3431 dev_err(adev->dev, "asic reset on init failed\n");
3437 pci_enable_pcie_error_reporting(adev->pdev);
3439 /* Post card if necessary */
3440 if (amdgpu_device_need_post(adev)) {
3442 dev_err(adev->dev, "no vBIOS found\n");
3446 DRM_INFO("GPU posting now...\n");
3447 r = amdgpu_device_asic_init(adev);
3449 dev_err(adev->dev, "gpu post error!\n");
3454 if (adev->is_atom_fw) {
3455 /* Initialize clocks */
3456 r = amdgpu_atomfirmware_get_clock_info(adev);
3458 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3459 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3463 /* Initialize clocks */
3464 r = amdgpu_atombios_get_clock_info(adev);
3466 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3467 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3470 /* init i2c buses */
3471 if (!amdgpu_device_has_dc_support(adev))
3472 amdgpu_atombios_i2c_init(adev);
3477 r = amdgpu_fence_driver_init(adev);
3479 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3480 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3484 /* init the mode config */
3485 drm_mode_config_init(adev_to_drm(adev));
3487 r = amdgpu_device_ip_init(adev);
3489 /* failed in exclusive mode due to timeout */
3490 if (amdgpu_sriov_vf(adev) &&
3491 !amdgpu_sriov_runtime(adev) &&
3492 amdgpu_virt_mmio_blocked(adev) &&
3493 !amdgpu_virt_wait_reset(adev)) {
3494 dev_err(adev->dev, "VF exclusive mode timeout\n");
3495 /* Don't send request since VF is inactive. */
3496 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3497 adev->virt.ops = NULL;
3499 goto release_ras_con;
3501 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3502 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3503 goto release_ras_con;
3507 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3508 adev->gfx.config.max_shader_engines,
3509 adev->gfx.config.max_sh_per_se,
3510 adev->gfx.config.max_cu_per_sh,
3511 adev->gfx.cu_info.number);
3513 adev->accel_working = true;
3515 amdgpu_vm_check_compute_bug(adev);
3517 /* Initialize the buffer migration limit. */
3518 if (amdgpu_moverate >= 0)
3519 max_MBps = amdgpu_moverate;
3521 max_MBps = 8; /* Allow 8 MB/s. */
3522 /* Get a log2 for easy divisions. */
3523 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3525 amdgpu_fbdev_init(adev);
3527 r = amdgpu_pm_sysfs_init(adev);
3529 adev->pm_sysfs_en = false;
3530 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3532 adev->pm_sysfs_en = true;
3534 r = amdgpu_ucode_sysfs_init(adev);
3536 adev->ucode_sysfs_en = false;
3537 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3539 adev->ucode_sysfs_en = true;
3541 if ((amdgpu_testing & 1)) {
3542 if (adev->accel_working)
3543 amdgpu_test_moves(adev);
3545 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3547 if (amdgpu_benchmarking) {
3548 if (adev->accel_working)
3549 amdgpu_benchmark(adev, amdgpu_benchmarking);
3551 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3555 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3556 * Otherwise the mgpu fan boost feature will be skipped due to the
3557 * gpu instance is counted less.
3559 amdgpu_register_gpu_instance(adev);
3561 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3562 * explicit gating rather than handling it automatically.
3564 if (!adev->gmc.xgmi.pending_reset) {
3565 r = amdgpu_device_ip_late_init(adev);
3567 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3568 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3569 goto release_ras_con;
3572 amdgpu_ras_resume(adev);
3573 queue_delayed_work(system_wq, &adev->delayed_init_work,
3574 msecs_to_jiffies(AMDGPU_RESUME_MS));
3577 if (amdgpu_sriov_vf(adev))
3578 flush_delayed_work(&adev->delayed_init_work);
3580 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3582 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3584 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3585 r = amdgpu_pmu_init(adev);
3587 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3589 /* Have stored pci confspace at hand for restore in sudden PCI error */
3590 if (amdgpu_device_cache_pci_state(adev->pdev))
3591 pci_restore_state(pdev);
3593 if (adev->gmc.xgmi.pending_reset)
3594 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3595 msecs_to_jiffies(AMDGPU_RESUME_MS));
3600 amdgpu_release_ras_context(adev);
3603 amdgpu_vf_error_trans_all(adev);
3605 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3608 iounmap(adev->rmmio);
3615 * amdgpu_device_fini - tear down the driver
3617 * @adev: amdgpu_device pointer
3619 * Tear down the driver info (all asics).
3620 * Called at driver shutdown.
3622 void amdgpu_device_fini(struct amdgpu_device *adev)
3624 dev_info(adev->dev, "amdgpu: finishing device.\n");
3625 flush_delayed_work(&adev->delayed_init_work);
3626 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3627 adev->shutdown = true;
3629 kfree(adev->pci_state);
3631 /* make sure IB test finished before entering exclusive mode
3632 * to avoid preemption on IB test
3634 if (amdgpu_sriov_vf(adev)) {
3635 amdgpu_virt_request_full_gpu(adev, false);
3636 amdgpu_virt_fini_data_exchange(adev);
3639 /* disable all interrupts */
3640 amdgpu_irq_disable_all(adev);
3641 if (adev->mode_info.mode_config_initialized){
3642 if (!amdgpu_device_has_dc_support(adev))
3643 drm_helper_force_disable_all(adev_to_drm(adev));
3645 drm_atomic_helper_shutdown(adev_to_drm(adev));
3647 amdgpu_fence_driver_fini(adev);
3648 if (adev->pm_sysfs_en)
3649 amdgpu_pm_sysfs_fini(adev);
3650 amdgpu_fbdev_fini(adev);
3651 amdgpu_device_ip_fini(adev);
3652 release_firmware(adev->firmware.gpu_info_fw);
3653 adev->firmware.gpu_info_fw = NULL;
3654 adev->accel_working = false;
3655 /* free i2c buses */
3656 if (!amdgpu_device_has_dc_support(adev))
3657 amdgpu_i2c_fini(adev);
3659 if (amdgpu_emu_mode != 1)
3660 amdgpu_atombios_fini(adev);
3664 if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3665 vga_switcheroo_unregister_client(adev->pdev);
3666 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3668 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3669 vga_client_register(adev->pdev, NULL, NULL, NULL);
3670 iounmap(adev->rmmio);
3672 amdgpu_device_doorbell_fini(adev);
3674 if (adev->ucode_sysfs_en)
3675 amdgpu_ucode_sysfs_fini(adev);
3677 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3678 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3679 amdgpu_pmu_fini(adev);
3680 if (adev->mman.discovery_bin)
3681 amdgpu_discovery_fini(adev);
3689 * amdgpu_device_suspend - initiate device suspend
3691 * @dev: drm dev pointer
3692 * @fbcon : notify the fbdev of suspend
3694 * Puts the hw in the suspend state (all asics).
3695 * Returns 0 for success or an error on failure.
3696 * Called at driver suspend.
3698 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3700 struct amdgpu_device *adev;
3701 struct drm_crtc *crtc;
3702 struct drm_connector *connector;
3703 struct drm_connector_list_iter iter;
3706 adev = drm_to_adev(dev);
3708 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3711 adev->in_suspend = true;
3712 drm_kms_helper_poll_disable(dev);
3715 amdgpu_fbdev_set_suspend(adev, 1);
3717 cancel_delayed_work_sync(&adev->delayed_init_work);
3719 if (!amdgpu_device_has_dc_support(adev)) {
3720 /* turn off display hw */
3721 drm_modeset_lock_all(dev);
3722 drm_connector_list_iter_begin(dev, &iter);
3723 drm_for_each_connector_iter(connector, &iter)
3724 drm_helper_connector_dpms(connector,
3726 drm_connector_list_iter_end(&iter);
3727 drm_modeset_unlock_all(dev);
3728 /* unpin the front buffers and cursors */
3729 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3730 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3731 struct drm_framebuffer *fb = crtc->primary->fb;
3732 struct amdgpu_bo *robj;
3734 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3735 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3736 r = amdgpu_bo_reserve(aobj, true);
3738 amdgpu_bo_unpin(aobj);
3739 amdgpu_bo_unreserve(aobj);
3743 if (fb == NULL || fb->obj[0] == NULL) {
3746 robj = gem_to_amdgpu_bo(fb->obj[0]);
3747 /* don't unpin kernel fb objects */
3748 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3749 r = amdgpu_bo_reserve(robj, true);
3751 amdgpu_bo_unpin(robj);
3752 amdgpu_bo_unreserve(robj);
3758 amdgpu_ras_suspend(adev);
3760 r = amdgpu_device_ip_suspend_phase1(adev);
3762 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3764 /* evict vram memory */
3765 amdgpu_bo_evict_vram(adev);
3767 amdgpu_fence_driver_suspend(adev);
3769 if (adev->in_poweroff_reboot_com ||
3770 !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
3771 r = amdgpu_device_ip_suspend_phase2(adev);
3773 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
3774 /* evict remaining vram memory
3775 * This second call to evict vram is to evict the gart page table
3778 amdgpu_bo_evict_vram(adev);
3784 * amdgpu_device_resume - initiate device resume
3786 * @dev: drm dev pointer
3787 * @fbcon : notify the fbdev of resume
3789 * Bring the hw back to operating state (all asics).
3790 * Returns 0 for success or an error on failure.
3791 * Called at driver resume.
3793 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3795 struct drm_connector *connector;
3796 struct drm_connector_list_iter iter;
3797 struct amdgpu_device *adev = drm_to_adev(dev);
3798 struct drm_crtc *crtc;
3801 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3804 if (amdgpu_acpi_is_s0ix_supported(adev))
3805 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3808 if (amdgpu_device_need_post(adev)) {
3809 r = amdgpu_device_asic_init(adev);
3811 dev_err(adev->dev, "amdgpu asic init failed\n");
3814 r = amdgpu_device_ip_resume(adev);
3816 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3819 amdgpu_fence_driver_resume(adev);
3822 r = amdgpu_device_ip_late_init(adev);
3826 queue_delayed_work(system_wq, &adev->delayed_init_work,
3827 msecs_to_jiffies(AMDGPU_RESUME_MS));
3829 if (!amdgpu_device_has_dc_support(adev)) {
3831 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3832 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3834 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3835 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3836 r = amdgpu_bo_reserve(aobj, true);
3838 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3840 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3841 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3842 amdgpu_bo_unreserve(aobj);
3847 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3851 /* Make sure IB tests flushed */
3852 flush_delayed_work(&adev->delayed_init_work);
3854 /* blat the mode back in */
3856 if (!amdgpu_device_has_dc_support(adev)) {
3858 drm_helper_resume_force_mode(dev);
3860 /* turn on display hw */
3861 drm_modeset_lock_all(dev);
3863 drm_connector_list_iter_begin(dev, &iter);
3864 drm_for_each_connector_iter(connector, &iter)
3865 drm_helper_connector_dpms(connector,
3867 drm_connector_list_iter_end(&iter);
3869 drm_modeset_unlock_all(dev);
3871 amdgpu_fbdev_set_suspend(adev, 0);
3874 drm_kms_helper_poll_enable(dev);
3876 amdgpu_ras_resume(adev);
3879 * Most of the connector probing functions try to acquire runtime pm
3880 * refs to ensure that the GPU is powered on when connector polling is
3881 * performed. Since we're calling this from a runtime PM callback,
3882 * trying to acquire rpm refs will cause us to deadlock.
3884 * Since we're guaranteed to be holding the rpm lock, it's safe to
3885 * temporarily disable the rpm helpers so this doesn't deadlock us.
3888 dev->dev->power.disable_depth++;
3890 if (!amdgpu_device_has_dc_support(adev))
3891 drm_helper_hpd_irq_event(dev);
3893 drm_kms_helper_hotplug_event(dev);
3895 dev->dev->power.disable_depth--;
3897 adev->in_suspend = false;
3903 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3905 * @adev: amdgpu_device pointer
3907 * The list of all the hardware IPs that make up the asic is walked and
3908 * the check_soft_reset callbacks are run. check_soft_reset determines
3909 * if the asic is still hung or not.
3910 * Returns true if any of the IPs are still in a hung state, false if not.
3912 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3915 bool asic_hang = false;
3917 if (amdgpu_sriov_vf(adev))
3920 if (amdgpu_asic_need_full_reset(adev))
3923 for (i = 0; i < adev->num_ip_blocks; i++) {
3924 if (!adev->ip_blocks[i].status.valid)
3926 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3927 adev->ip_blocks[i].status.hang =
3928 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3929 if (adev->ip_blocks[i].status.hang) {
3930 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3938 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3940 * @adev: amdgpu_device pointer
3942 * The list of all the hardware IPs that make up the asic is walked and the
3943 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3944 * handles any IP specific hardware or software state changes that are
3945 * necessary for a soft reset to succeed.
3946 * Returns 0 on success, negative error code on failure.
3948 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3952 for (i = 0; i < adev->num_ip_blocks; i++) {
3953 if (!adev->ip_blocks[i].status.valid)
3955 if (adev->ip_blocks[i].status.hang &&
3956 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3957 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3967 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3969 * @adev: amdgpu_device pointer
3971 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3972 * reset is necessary to recover.
3973 * Returns true if a full asic reset is required, false if not.
3975 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3979 if (amdgpu_asic_need_full_reset(adev))
3982 for (i = 0; i < adev->num_ip_blocks; i++) {
3983 if (!adev->ip_blocks[i].status.valid)
3985 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3986 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3987 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3988 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3989 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3990 if (adev->ip_blocks[i].status.hang) {
3991 dev_info(adev->dev, "Some block need full reset!\n");
4000 * amdgpu_device_ip_soft_reset - do a soft reset
4002 * @adev: amdgpu_device pointer
4004 * The list of all the hardware IPs that make up the asic is walked and the
4005 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4006 * IP specific hardware or software state changes that are necessary to soft
4008 * Returns 0 on success, negative error code on failure.
4010 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4014 for (i = 0; i < adev->num_ip_blocks; i++) {
4015 if (!adev->ip_blocks[i].status.valid)
4017 if (adev->ip_blocks[i].status.hang &&
4018 adev->ip_blocks[i].version->funcs->soft_reset) {
4019 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4029 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4031 * @adev: amdgpu_device pointer
4033 * The list of all the hardware IPs that make up the asic is walked and the
4034 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4035 * handles any IP specific hardware or software state changes that are
4036 * necessary after the IP has been soft reset.
4037 * Returns 0 on success, negative error code on failure.
4039 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4043 for (i = 0; i < adev->num_ip_blocks; i++) {
4044 if (!adev->ip_blocks[i].status.valid)
4046 if (adev->ip_blocks[i].status.hang &&
4047 adev->ip_blocks[i].version->funcs->post_soft_reset)
4048 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4057 * amdgpu_device_recover_vram - Recover some VRAM contents
4059 * @adev: amdgpu_device pointer
4061 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4062 * restore things like GPUVM page tables after a GPU reset where
4063 * the contents of VRAM might be lost.
4066 * 0 on success, negative error code on failure.
4068 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4070 struct dma_fence *fence = NULL, *next = NULL;
4071 struct amdgpu_bo *shadow;
4074 if (amdgpu_sriov_runtime(adev))
4075 tmo = msecs_to_jiffies(8000);
4077 tmo = msecs_to_jiffies(100);
4079 dev_info(adev->dev, "recover vram bo from shadow start\n");
4080 mutex_lock(&adev->shadow_list_lock);
4081 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4083 /* No need to recover an evicted BO */
4084 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4085 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4086 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4089 r = amdgpu_bo_restore_shadow(shadow, &next);
4094 tmo = dma_fence_wait_timeout(fence, false, tmo);
4095 dma_fence_put(fence);
4100 } else if (tmo < 0) {
4108 mutex_unlock(&adev->shadow_list_lock);
4111 tmo = dma_fence_wait_timeout(fence, false, tmo);
4112 dma_fence_put(fence);
4114 if (r < 0 || tmo <= 0) {
4115 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4119 dev_info(adev->dev, "recover vram bo from shadow done\n");
4125 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4127 * @adev: amdgpu_device pointer
4128 * @from_hypervisor: request from hypervisor
4130 * do VF FLR and reinitialize Asic
4131 * return 0 means succeeded otherwise failed
4133 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4134 bool from_hypervisor)
4138 if (from_hypervisor)
4139 r = amdgpu_virt_request_full_gpu(adev, true);
4141 r = amdgpu_virt_reset_gpu(adev);
4145 amdgpu_amdkfd_pre_reset(adev);
4147 /* Resume IP prior to SMC */
4148 r = amdgpu_device_ip_reinit_early_sriov(adev);
4152 amdgpu_virt_init_data_exchange(adev);
4153 /* we need recover gart prior to run SMC/CP/SDMA resume */
4154 amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4156 r = amdgpu_device_fw_loading(adev);
4160 /* now we are okay to resume SMC/CP/SDMA */
4161 r = amdgpu_device_ip_reinit_late_sriov(adev);
4165 amdgpu_irq_gpu_reset_resume_helper(adev);
4166 r = amdgpu_ib_ring_tests(adev);
4167 amdgpu_amdkfd_post_reset(adev);
4170 amdgpu_virt_release_full_gpu(adev, true);
4171 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4172 amdgpu_inc_vram_lost(adev);
4173 r = amdgpu_device_recover_vram(adev);
4180 * amdgpu_device_has_job_running - check if there is any job in mirror list
4182 * @adev: amdgpu_device pointer
4184 * check if there is any job in mirror list
4186 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4189 struct drm_sched_job *job;
4191 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4192 struct amdgpu_ring *ring = adev->rings[i];
4194 if (!ring || !ring->sched.thread)
4197 spin_lock(&ring->sched.job_list_lock);
4198 job = list_first_entry_or_null(&ring->sched.pending_list,
4199 struct drm_sched_job, list);
4200 spin_unlock(&ring->sched.job_list_lock);
4208 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4210 * @adev: amdgpu_device pointer
4212 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4215 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4217 if (!amdgpu_device_ip_check_soft_reset(adev)) {
4218 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4222 if (amdgpu_gpu_recovery == 0)
4225 if (amdgpu_sriov_vf(adev))
4228 if (amdgpu_gpu_recovery == -1) {
4229 switch (adev->asic_type) {
4235 case CHIP_POLARIS10:
4236 case CHIP_POLARIS11:
4237 case CHIP_POLARIS12:
4248 case CHIP_SIENNA_CICHLID:
4249 case CHIP_NAVY_FLOUNDER:
4250 case CHIP_DIMGREY_CAVEFISH:
4261 dev_info(adev->dev, "GPU recovery disabled.\n");
4265 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4270 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4272 dev_info(adev->dev, "GPU mode1 reset\n");
4275 pci_clear_master(adev->pdev);
4277 amdgpu_device_cache_pci_state(adev->pdev);
4279 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4280 dev_info(adev->dev, "GPU smu mode1 reset\n");
4281 ret = amdgpu_dpm_mode1_reset(adev);
4283 dev_info(adev->dev, "GPU psp mode1 reset\n");
4284 ret = psp_gpu_reset(adev);
4288 dev_err(adev->dev, "GPU mode1 reset failed\n");
4290 amdgpu_device_load_pci_state(adev->pdev);
4292 /* wait for asic to come out of reset */
4293 for (i = 0; i < adev->usec_timeout; i++) {
4294 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4296 if (memsize != 0xffffffff)
4301 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4305 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4306 struct amdgpu_job *job,
4307 bool *need_full_reset_arg)
4310 bool need_full_reset = *need_full_reset_arg;
4312 /* no need to dump if device is not in good state during probe period */
4313 if (!adev->gmc.xgmi.pending_reset)
4314 amdgpu_debugfs_wait_dump(adev);
4316 if (amdgpu_sriov_vf(adev)) {
4317 /* stop the data exchange thread */
4318 amdgpu_virt_fini_data_exchange(adev);
4321 /* block all schedulers and reset given job's ring */
4322 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4323 struct amdgpu_ring *ring = adev->rings[i];
4325 if (!ring || !ring->sched.thread)
4328 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4329 amdgpu_fence_driver_force_completion(ring);
4333 drm_sched_increase_karma(&job->base);
4335 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4336 if (!amdgpu_sriov_vf(adev)) {
4338 if (!need_full_reset)
4339 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4341 if (!need_full_reset) {
4342 amdgpu_device_ip_pre_soft_reset(adev);
4343 r = amdgpu_device_ip_soft_reset(adev);
4344 amdgpu_device_ip_post_soft_reset(adev);
4345 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4346 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4347 need_full_reset = true;
4351 if (need_full_reset)
4352 r = amdgpu_device_ip_suspend(adev);
4354 *need_full_reset_arg = need_full_reset;
4360 int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4361 struct list_head *device_list_handle,
4362 bool *need_full_reset_arg,
4365 struct amdgpu_device *tmp_adev = NULL;
4366 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4370 * ASIC reset has to be done on all XGMI hive nodes ASAP
4371 * to allow proper links negotiation in FW (within 1 sec)
4373 if (!skip_hw_reset && need_full_reset) {
4374 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4375 /* For XGMI run all resets in parallel to speed up the process */
4376 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4377 tmp_adev->gmc.xgmi.pending_reset = false;
4378 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4381 r = amdgpu_asic_reset(tmp_adev);
4384 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4385 r, adev_to_drm(tmp_adev)->unique);
4390 /* For XGMI wait for all resets to complete before proceed */
4392 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4393 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4394 flush_work(&tmp_adev->xgmi_reset_work);
4395 r = tmp_adev->asic_reset_res;
4403 if (!r && amdgpu_ras_intr_triggered()) {
4404 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4405 if (tmp_adev->mmhub.funcs &&
4406 tmp_adev->mmhub.funcs->reset_ras_error_count)
4407 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4410 amdgpu_ras_intr_cleared();
4413 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4414 if (need_full_reset) {
4416 r = amdgpu_device_asic_init(tmp_adev);
4418 dev_warn(tmp_adev->dev, "asic atom init failed!");
4420 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4421 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4425 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4427 DRM_INFO("VRAM is lost due to GPU reset!\n");
4428 amdgpu_inc_vram_lost(tmp_adev);
4431 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4435 r = amdgpu_device_fw_loading(tmp_adev);
4439 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4444 amdgpu_device_fill_reset_magic(tmp_adev);
4447 * Add this ASIC as tracked as reset was already
4448 * complete successfully.
4450 amdgpu_register_gpu_instance(tmp_adev);
4452 if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4453 amdgpu_xgmi_add_device(tmp_adev);
4455 r = amdgpu_device_ip_late_init(tmp_adev);
4459 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4462 * The GPU enters bad state once faulty pages
4463 * by ECC has reached the threshold, and ras
4464 * recovery is scheduled next. So add one check
4465 * here to break recovery if it indeed exceeds
4466 * bad page threshold, and remind user to
4467 * retire this GPU or setting one bigger
4468 * bad_page_threshold value to fix this once
4469 * probing driver again.
4471 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4473 amdgpu_ras_resume(tmp_adev);
4479 /* Update PSP FW topology after reset */
4480 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4481 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4487 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4488 r = amdgpu_ib_ring_tests(tmp_adev);
4490 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4491 r = amdgpu_device_ip_suspend(tmp_adev);
4492 need_full_reset = true;
4499 r = amdgpu_device_recover_vram(tmp_adev);
4501 tmp_adev->asic_reset_res = r;
4505 *need_full_reset_arg = need_full_reset;
4509 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4510 struct amdgpu_hive_info *hive)
4512 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4516 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4518 down_write(&adev->reset_sem);
4521 switch (amdgpu_asic_reset_method(adev)) {
4522 case AMD_RESET_METHOD_MODE1:
4523 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4525 case AMD_RESET_METHOD_MODE2:
4526 adev->mp1_state = PP_MP1_STATE_RESET;
4529 adev->mp1_state = PP_MP1_STATE_NONE;
4536 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4538 amdgpu_vf_error_trans_all(adev);
4539 adev->mp1_state = PP_MP1_STATE_NONE;
4540 atomic_set(&adev->in_gpu_reset, 0);
4541 up_write(&adev->reset_sem);
4545 * to lockup a list of amdgpu devices in a hive safely, if not a hive
4546 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4548 * unlock won't require roll back.
4550 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4552 struct amdgpu_device *tmp_adev = NULL;
4554 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4556 dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4559 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4560 if (!amdgpu_device_lock_adev(tmp_adev, hive))
4563 } else if (!amdgpu_device_lock_adev(adev, hive))
4568 if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4570 * if the lockup iteration break in the middle of a hive,
4571 * it may means there may has a race issue,
4572 * or a hive device locked up independently.
4573 * we may be in trouble and may not, so will try to roll back
4574 * the lock and give out a warnning.
4576 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4577 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4578 amdgpu_device_unlock_adev(tmp_adev);
4584 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4586 struct pci_dev *p = NULL;
4588 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4589 adev->pdev->bus->number, 1);
4591 pm_runtime_enable(&(p->dev));
4592 pm_runtime_resume(&(p->dev));
4596 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4598 enum amd_reset_method reset_method;
4599 struct pci_dev *p = NULL;
4603 * For now, only BACO and mode1 reset are confirmed
4604 * to suffer the audio issue without proper suspended.
4606 reset_method = amdgpu_asic_reset_method(adev);
4607 if ((reset_method != AMD_RESET_METHOD_BACO) &&
4608 (reset_method != AMD_RESET_METHOD_MODE1))
4611 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4612 adev->pdev->bus->number, 1);
4616 expires = pm_runtime_autosuspend_expiration(&(p->dev));
4619 * If we cannot get the audio device autosuspend delay,
4620 * a fixed 4S interval will be used. Considering 3S is
4621 * the audio controller default autosuspend delay setting.
4622 * 4S used here is guaranteed to cover that.
4624 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4626 while (!pm_runtime_status_suspended(&(p->dev))) {
4627 if (!pm_runtime_suspend(&(p->dev)))
4630 if (expires < ktime_get_mono_fast_ns()) {
4631 dev_warn(adev->dev, "failed to suspend display audio\n");
4632 /* TODO: abort the succeeding gpu reset? */
4637 pm_runtime_disable(&(p->dev));
4643 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4645 * @adev: amdgpu_device pointer
4646 * @job: which job trigger hang
4648 * Attempt to reset the GPU if it has hung (all asics).
4649 * Attempt to do soft-reset or full-reset and reinitialize Asic
4650 * Returns 0 for success or an error on failure.
4653 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4654 struct amdgpu_job *job)
4656 struct list_head device_list, *device_list_handle = NULL;
4657 bool need_full_reset = false;
4658 bool job_signaled = false;
4659 struct amdgpu_hive_info *hive = NULL;
4660 struct amdgpu_device *tmp_adev = NULL;
4662 bool need_emergency_restart = false;
4663 bool audio_suspended = false;
4666 * Special case: RAS triggered and full reset isn't supported
4668 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4671 * Flush RAM to disk so that after reboot
4672 * the user can read log and see why the system rebooted.
4674 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4675 DRM_WARN("Emergency reboot.");
4678 emergency_restart();
4681 dev_info(adev->dev, "GPU %s begin!\n",
4682 need_emergency_restart ? "jobs stop":"reset");
4685 * Here we trylock to avoid chain of resets executing from
4686 * either trigger by jobs on different adevs in XGMI hive or jobs on
4687 * different schedulers for same device while this TO handler is running.
4688 * We always reset all schedulers for device and all devices for XGMI
4689 * hive so that should take care of them too.
4691 hive = amdgpu_get_xgmi_hive(adev);
4693 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4694 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4695 job ? job->base.id : -1, hive->hive_id);
4696 amdgpu_put_xgmi_hive(hive);
4698 drm_sched_increase_karma(&job->base);
4701 mutex_lock(&hive->hive_lock);
4705 * lock the device before we try to operate the linked list
4706 * if didn't get the device lock, don't touch the linked list since
4707 * others may iterating it.
4709 r = amdgpu_device_lock_hive_adev(adev, hive);
4711 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4712 job ? job->base.id : -1);
4714 /* even we skipped this reset, still need to set the job to guilty */
4716 drm_sched_increase_karma(&job->base);
4721 * Build list of devices to reset.
4722 * In case we are in XGMI hive mode, resort the device list
4723 * to put adev in the 1st position.
4725 INIT_LIST_HEAD(&device_list);
4726 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4727 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4728 list_add_tail(&tmp_adev->reset_list, &device_list);
4729 if (!list_is_first(&adev->reset_list, &device_list))
4730 list_rotate_to_front(&adev->reset_list, &device_list);
4731 device_list_handle = &device_list;
4733 list_add_tail(&adev->reset_list, &device_list);
4734 device_list_handle = &device_list;
4737 /* block all schedulers and reset given job's ring */
4738 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4740 * Try to put the audio codec into suspend state
4741 * before gpu reset started.
4743 * Due to the power domain of the graphics device
4744 * is shared with AZ power domain. Without this,
4745 * we may change the audio hardware from behind
4746 * the audio driver's back. That will trigger
4747 * some audio codec errors.
4749 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4750 audio_suspended = true;
4752 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4754 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4756 if (!amdgpu_sriov_vf(tmp_adev))
4757 amdgpu_amdkfd_pre_reset(tmp_adev);
4760 * Mark these ASICs to be reseted as untracked first
4761 * And add them back after reset completed
4763 amdgpu_unregister_gpu_instance(tmp_adev);
4765 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4767 /* disable ras on ALL IPs */
4768 if (!need_emergency_restart &&
4769 amdgpu_device_ip_need_full_reset(tmp_adev))
4770 amdgpu_ras_suspend(tmp_adev);
4772 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4773 struct amdgpu_ring *ring = tmp_adev->rings[i];
4775 if (!ring || !ring->sched.thread)
4778 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4780 if (need_emergency_restart)
4781 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4783 atomic_inc(&tmp_adev->gpu_reset_counter);
4786 if (need_emergency_restart)
4787 goto skip_sched_resume;
4790 * Must check guilty signal here since after this point all old
4791 * HW fences are force signaled.
4793 * job->base holds a reference to parent fence
4795 if (job && job->base.s_fence->parent &&
4796 dma_fence_is_signaled(job->base.s_fence->parent)) {
4797 job_signaled = true;
4798 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4802 retry: /* Rest of adevs pre asic reset from XGMI hive. */
4803 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4804 r = amdgpu_device_pre_asic_reset(tmp_adev,
4805 (tmp_adev == adev) ? job : NULL,
4807 /*TODO Should we stop ?*/
4809 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4810 r, adev_to_drm(tmp_adev)->unique);
4811 tmp_adev->asic_reset_res = r;
4815 /* Actual ASIC resets if needed.*/
4816 /* TODO Implement XGMI hive reset logic for SRIOV */
4817 if (amdgpu_sriov_vf(adev)) {
4818 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4820 adev->asic_reset_res = r;
4822 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4823 if (r && r == -EAGAIN)
4829 /* Post ASIC reset for all devs .*/
4830 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4832 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4833 struct amdgpu_ring *ring = tmp_adev->rings[i];
4835 if (!ring || !ring->sched.thread)
4838 /* No point to resubmit jobs if we didn't HW reset*/
4839 if (!tmp_adev->asic_reset_res && !job_signaled)
4840 drm_sched_resubmit_jobs(&ring->sched);
4842 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4845 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4846 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4849 tmp_adev->asic_reset_res = 0;
4852 /* bad news, how to tell it to userspace ? */
4853 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4854 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4856 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4861 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4862 /* unlock kfd: SRIOV would do it separately */
4863 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4864 amdgpu_amdkfd_post_reset(tmp_adev);
4866 /* kfd_post_reset will do nothing if kfd device is not initialized,
4867 * need to bring up kfd here if it's not be initialized before
4869 if (!adev->kfd.init_complete)
4870 amdgpu_amdkfd_device_init(adev);
4872 if (audio_suspended)
4873 amdgpu_device_resume_display_audio(tmp_adev);
4874 amdgpu_device_unlock_adev(tmp_adev);
4879 atomic_set(&hive->in_reset, 0);
4880 mutex_unlock(&hive->hive_lock);
4881 amdgpu_put_xgmi_hive(hive);
4884 if (r && r != -EAGAIN)
4885 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4890 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4892 * @adev: amdgpu_device pointer
4894 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4895 * and lanes) of the slot the device is in. Handles APUs and
4896 * virtualized environments where PCIE config space may not be available.
4898 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4900 struct pci_dev *pdev;
4901 enum pci_bus_speed speed_cap, platform_speed_cap;
4902 enum pcie_link_width platform_link_width;
4904 if (amdgpu_pcie_gen_cap)
4905 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4907 if (amdgpu_pcie_lane_cap)
4908 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4910 /* covers APUs as well */
4911 if (pci_is_root_bus(adev->pdev->bus)) {
4912 if (adev->pm.pcie_gen_mask == 0)
4913 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4914 if (adev->pm.pcie_mlw_mask == 0)
4915 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4919 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4922 pcie_bandwidth_available(adev->pdev, NULL,
4923 &platform_speed_cap, &platform_link_width);
4925 if (adev->pm.pcie_gen_mask == 0) {
4928 speed_cap = pcie_get_speed_cap(pdev);
4929 if (speed_cap == PCI_SPEED_UNKNOWN) {
4930 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4931 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4932 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4934 if (speed_cap == PCIE_SPEED_32_0GT)
4935 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4936 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4937 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4938 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4939 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
4940 else if (speed_cap == PCIE_SPEED_16_0GT)
4941 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4942 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4943 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4944 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4945 else if (speed_cap == PCIE_SPEED_8_0GT)
4946 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4947 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4948 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4949 else if (speed_cap == PCIE_SPEED_5_0GT)
4950 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4951 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4953 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4956 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4957 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4958 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4960 if (platform_speed_cap == PCIE_SPEED_32_0GT)
4961 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4962 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4963 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4964 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4965 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
4966 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4967 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4968 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4969 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4970 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4971 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4972 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4973 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4974 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4975 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4976 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4977 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4979 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4983 if (adev->pm.pcie_mlw_mask == 0) {
4984 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4985 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4987 switch (platform_link_width) {
4989 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4990 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4991 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4993 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4994 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4995 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4998 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5000 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5001 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5002 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5006 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5007 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5008 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5009 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5010 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5013 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5014 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5015 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5016 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5019 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5020 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5024 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5025 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5028 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5037 int amdgpu_device_baco_enter(struct drm_device *dev)
5039 struct amdgpu_device *adev = drm_to_adev(dev);
5040 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5042 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5045 if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
5046 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5048 return amdgpu_dpm_baco_enter(adev);
5051 int amdgpu_device_baco_exit(struct drm_device *dev)
5053 struct amdgpu_device *adev = drm_to_adev(dev);
5054 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5057 if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5060 ret = amdgpu_dpm_baco_exit(adev);
5064 if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
5065 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5070 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5074 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5075 struct amdgpu_ring *ring = adev->rings[i];
5077 if (!ring || !ring->sched.thread)
5080 cancel_delayed_work_sync(&ring->sched.work_tdr);
5085 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5086 * @pdev: PCI device struct
5087 * @state: PCI channel state
5089 * Description: Called when a PCI error is detected.
5091 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5093 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5095 struct drm_device *dev = pci_get_drvdata(pdev);
5096 struct amdgpu_device *adev = drm_to_adev(dev);
5099 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5101 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5102 DRM_WARN("No support for XGMI hive yet...");
5103 return PCI_ERS_RESULT_DISCONNECT;
5107 case pci_channel_io_normal:
5108 return PCI_ERS_RESULT_CAN_RECOVER;
5109 /* Fatal error, prepare for slot reset */
5110 case pci_channel_io_frozen:
5112 * Cancel and wait for all TDRs in progress if failing to
5113 * set adev->in_gpu_reset in amdgpu_device_lock_adev
5115 * Locking adev->reset_sem will prevent any external access
5116 * to GPU during PCI error recovery
5118 while (!amdgpu_device_lock_adev(adev, NULL))
5119 amdgpu_cancel_all_tdr(adev);
5122 * Block any work scheduling as we do for regular GPU reset
5123 * for the duration of the recovery
5125 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5126 struct amdgpu_ring *ring = adev->rings[i];
5128 if (!ring || !ring->sched.thread)
5131 drm_sched_stop(&ring->sched, NULL);
5133 atomic_inc(&adev->gpu_reset_counter);
5134 return PCI_ERS_RESULT_NEED_RESET;
5135 case pci_channel_io_perm_failure:
5136 /* Permanent error, prepare for device removal */
5137 return PCI_ERS_RESULT_DISCONNECT;
5140 return PCI_ERS_RESULT_NEED_RESET;
5144 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5145 * @pdev: pointer to PCI device
5147 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5150 DRM_INFO("PCI error: mmio enabled callback!!\n");
5152 /* TODO - dump whatever for debugging purposes */
5154 /* This called only if amdgpu_pci_error_detected returns
5155 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5156 * works, no need to reset slot.
5159 return PCI_ERS_RESULT_RECOVERED;
5163 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5164 * @pdev: PCI device struct
5166 * Description: This routine is called by the pci error recovery
5167 * code after the PCI slot has been reset, just before we
5168 * should resume normal operations.
5170 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5172 struct drm_device *dev = pci_get_drvdata(pdev);
5173 struct amdgpu_device *adev = drm_to_adev(dev);
5175 bool need_full_reset = true;
5177 struct list_head device_list;
5179 DRM_INFO("PCI error: slot reset callback!!\n");
5181 INIT_LIST_HEAD(&device_list);
5182 list_add_tail(&adev->reset_list, &device_list);
5184 /* wait for asic to come out of reset */
5187 /* Restore PCI confspace */
5188 amdgpu_device_load_pci_state(pdev);
5190 /* confirm ASIC came out of reset */
5191 for (i = 0; i < adev->usec_timeout; i++) {
5192 memsize = amdgpu_asic_get_config_memsize(adev);
5194 if (memsize != 0xffffffff)
5198 if (memsize == 0xffffffff) {
5203 adev->in_pci_err_recovery = true;
5204 r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5205 adev->in_pci_err_recovery = false;
5209 r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5213 if (amdgpu_device_cache_pci_state(adev->pdev))
5214 pci_restore_state(adev->pdev);
5216 DRM_INFO("PCIe error recovery succeeded\n");
5218 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5219 amdgpu_device_unlock_adev(adev);
5222 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5226 * amdgpu_pci_resume() - resume normal ops after PCI reset
5227 * @pdev: pointer to PCI device
5229 * Called when the error recovery driver tells us that its
5230 * OK to resume normal operation.
5232 void amdgpu_pci_resume(struct pci_dev *pdev)
5234 struct drm_device *dev = pci_get_drvdata(pdev);
5235 struct amdgpu_device *adev = drm_to_adev(dev);
5239 DRM_INFO("PCI error: resume callback!!\n");
5241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5242 struct amdgpu_ring *ring = adev->rings[i];
5244 if (!ring || !ring->sched.thread)
5248 drm_sched_resubmit_jobs(&ring->sched);
5249 drm_sched_start(&ring->sched, true);
5252 amdgpu_device_unlock_adev(adev);
5255 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5257 struct drm_device *dev = pci_get_drvdata(pdev);
5258 struct amdgpu_device *adev = drm_to_adev(dev);
5261 r = pci_save_state(pdev);
5263 kfree(adev->pci_state);
5265 adev->pci_state = pci_store_saved_state(pdev);
5267 if (!adev->pci_state) {
5268 DRM_ERROR("Failed to store PCI saved state");
5272 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5279 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5281 struct drm_device *dev = pci_get_drvdata(pdev);
5282 struct amdgpu_device *adev = drm_to_adev(dev);
5285 if (!adev->pci_state)
5288 r = pci_load_saved_state(pdev, adev->pci_state);
5291 pci_restore_state(pdev);
5293 DRM_WARN("Failed to load PCI state, err:%d\n", r);