2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_offset.h"
42 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "gfxhub_v1_1.h"
51 #include "mmhub_v9_4.h"
53 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
55 #include "amdgpu_ras.h"
57 /* add these here since we already include dce12 headers and these are for DCN */
58 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
59 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
60 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
61 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
62 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
65 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
66 #define AMDGPU_NUM_OF_VMIDS 8
68 static const u32 golden_settings_vega10_hdp[] =
70 0xf64, 0x0fffffff, 0x00000000,
71 0xf65, 0x0fffffff, 0x00000000,
72 0xf66, 0x0fffffff, 0x00000000,
73 0xf67, 0x0fffffff, 0x00000000,
74 0xf68, 0x0fffffff, 0x00000000,
75 0xf6a, 0x0fffffff, 0x00000000,
76 0xf6b, 0x0fffffff, 0x00000000,
77 0xf6c, 0x0fffffff, 0x00000000,
78 0xf6d, 0x0fffffff, 0x00000000,
79 0xf6e, 0x0fffffff, 0x00000000,
82 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
84 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
85 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
88 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
90 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
91 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
94 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
95 (0x000143c0 + 0x00000000),
96 (0x000143c0 + 0x00000800),
97 (0x000143c0 + 0x00001000),
98 (0x000143c0 + 0x00001800),
99 (0x000543c0 + 0x00000000),
100 (0x000543c0 + 0x00000800),
101 (0x000543c0 + 0x00001000),
102 (0x000543c0 + 0x00001800),
103 (0x000943c0 + 0x00000000),
104 (0x000943c0 + 0x00000800),
105 (0x000943c0 + 0x00001000),
106 (0x000943c0 + 0x00001800),
107 (0x000d43c0 + 0x00000000),
108 (0x000d43c0 + 0x00000800),
109 (0x000d43c0 + 0x00001000),
110 (0x000d43c0 + 0x00001800),
111 (0x001143c0 + 0x00000000),
112 (0x001143c0 + 0x00000800),
113 (0x001143c0 + 0x00001000),
114 (0x001143c0 + 0x00001800),
115 (0x001543c0 + 0x00000000),
116 (0x001543c0 + 0x00000800),
117 (0x001543c0 + 0x00001000),
118 (0x001543c0 + 0x00001800),
119 (0x001943c0 + 0x00000000),
120 (0x001943c0 + 0x00000800),
121 (0x001943c0 + 0x00001000),
122 (0x001943c0 + 0x00001800),
123 (0x001d43c0 + 0x00000000),
124 (0x001d43c0 + 0x00000800),
125 (0x001d43c0 + 0x00001000),
126 (0x001d43c0 + 0x00001800),
129 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
130 (0x000143e0 + 0x00000000),
131 (0x000143e0 + 0x00000800),
132 (0x000143e0 + 0x00001000),
133 (0x000143e0 + 0x00001800),
134 (0x000543e0 + 0x00000000),
135 (0x000543e0 + 0x00000800),
136 (0x000543e0 + 0x00001000),
137 (0x000543e0 + 0x00001800),
138 (0x000943e0 + 0x00000000),
139 (0x000943e0 + 0x00000800),
140 (0x000943e0 + 0x00001000),
141 (0x000943e0 + 0x00001800),
142 (0x000d43e0 + 0x00000000),
143 (0x000d43e0 + 0x00000800),
144 (0x000d43e0 + 0x00001000),
145 (0x000d43e0 + 0x00001800),
146 (0x001143e0 + 0x00000000),
147 (0x001143e0 + 0x00000800),
148 (0x001143e0 + 0x00001000),
149 (0x001143e0 + 0x00001800),
150 (0x001543e0 + 0x00000000),
151 (0x001543e0 + 0x00000800),
152 (0x001543e0 + 0x00001000),
153 (0x001543e0 + 0x00001800),
154 (0x001943e0 + 0x00000000),
155 (0x001943e0 + 0x00000800),
156 (0x001943e0 + 0x00001000),
157 (0x001943e0 + 0x00001800),
158 (0x001d43e0 + 0x00000000),
159 (0x001d43e0 + 0x00000800),
160 (0x001d43e0 + 0x00001000),
161 (0x001d43e0 + 0x00001800),
164 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
165 (0x000143c2 + 0x00000000),
166 (0x000143c2 + 0x00000800),
167 (0x000143c2 + 0x00001000),
168 (0x000143c2 + 0x00001800),
169 (0x000543c2 + 0x00000000),
170 (0x000543c2 + 0x00000800),
171 (0x000543c2 + 0x00001000),
172 (0x000543c2 + 0x00001800),
173 (0x000943c2 + 0x00000000),
174 (0x000943c2 + 0x00000800),
175 (0x000943c2 + 0x00001000),
176 (0x000943c2 + 0x00001800),
177 (0x000d43c2 + 0x00000000),
178 (0x000d43c2 + 0x00000800),
179 (0x000d43c2 + 0x00001000),
180 (0x000d43c2 + 0x00001800),
181 (0x001143c2 + 0x00000000),
182 (0x001143c2 + 0x00000800),
183 (0x001143c2 + 0x00001000),
184 (0x001143c2 + 0x00001800),
185 (0x001543c2 + 0x00000000),
186 (0x001543c2 + 0x00000800),
187 (0x001543c2 + 0x00001000),
188 (0x001543c2 + 0x00001800),
189 (0x001943c2 + 0x00000000),
190 (0x001943c2 + 0x00000800),
191 (0x001943c2 + 0x00001000),
192 (0x001943c2 + 0x00001800),
193 (0x001d43c2 + 0x00000000),
194 (0x001d43c2 + 0x00000800),
195 (0x001d43c2 + 0x00001000),
196 (0x001d43c2 + 0x00001800),
199 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
200 struct amdgpu_irq_src *src,
202 enum amdgpu_interrupt_state state)
204 u32 bits, i, tmp, reg;
209 case AMDGPU_IRQ_STATE_DISABLE:
210 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
211 reg = ecc_umc_mcumc_ctrl_addrs[i];
216 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
217 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
223 case AMDGPU_IRQ_STATE_ENABLE:
224 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
225 reg = ecc_umc_mcumc_ctrl_addrs[i];
230 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
231 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
244 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
245 struct amdgpu_iv_entry *entry)
247 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
248 amdgpu_ras_reset_gpu(adev, 0);
249 return AMDGPU_RAS_UE;
252 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
253 struct amdgpu_irq_src *source,
254 struct amdgpu_iv_entry *entry)
256 struct ras_common_if *ras_if = adev->gmc.ras_if;
257 struct ras_dispatch_if ih_data = {
264 ih_data.head = *ras_if;
266 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
270 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
271 struct amdgpu_irq_src *src,
273 enum amdgpu_interrupt_state state)
275 struct amdgpu_vmhub *hub;
276 u32 tmp, reg, bits, i, j;
278 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
279 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
280 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
281 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
282 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
283 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
284 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
287 case AMDGPU_IRQ_STATE_DISABLE:
288 for (j = 0; j < adev->num_vmhubs; j++) {
289 hub = &adev->vmhub[j];
290 for (i = 0; i < 16; i++) {
291 reg = hub->vm_context0_cntl + i;
298 case AMDGPU_IRQ_STATE_ENABLE:
299 for (j = 0; j < adev->num_vmhubs; j++) {
300 hub = &adev->vmhub[j];
301 for (i = 0; i < 16; i++) {
302 reg = hub->vm_context0_cntl + i;
315 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
316 struct amdgpu_irq_src *source,
317 struct amdgpu_iv_entry *entry)
319 struct amdgpu_vmhub *hub;
320 bool retry_fault = !!(entry->src_data[1] & 0x80);
325 addr = (u64)entry->src_data[0] << 12;
326 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
328 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
330 return 1; /* This also prevents sending it to KFD */
332 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
333 snprintf(hub_name, sizeof(hub_name), "mmhub0");
334 hub = &adev->vmhub[AMDGPU_MMHUB_0];
335 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
336 snprintf(hub_name, sizeof(hub_name), "mmhub1");
337 hub = &adev->vmhub[AMDGPU_MMHUB_1];
339 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
340 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
343 /* If it's the first fault for this address, process it normally */
344 if (!amdgpu_sriov_vf(adev)) {
345 status = RREG32(hub->vm_l2_pro_fault_status);
346 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
349 if (printk_ratelimit()) {
350 struct amdgpu_task_info task_info;
352 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
353 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
356 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
357 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
358 hub_name, retry_fault ? "retry" : "no-retry",
359 entry->src_id, entry->ring_id, entry->vmid,
360 entry->pasid, task_info.process_name, task_info.tgid,
361 task_info.task_name, task_info.pid);
362 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
363 addr, entry->client_id);
364 if (!amdgpu_sriov_vf(adev)) {
366 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
368 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
369 REG_GET_FIELD(status,
370 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
371 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
372 REG_GET_FIELD(status,
373 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
374 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
375 REG_GET_FIELD(status,
376 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
377 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
378 REG_GET_FIELD(status,
379 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
387 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
388 .set = gmc_v9_0_vm_fault_interrupt_state,
389 .process = gmc_v9_0_process_interrupt,
393 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
394 .set = gmc_v9_0_ecc_interrupt_state,
395 .process = gmc_v9_0_process_ecc_irq,
398 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
400 adev->gmc.vm_fault.num_types = 1;
401 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
403 adev->gmc.ecc_irq.num_types = 1;
404 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
407 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
412 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
413 PER_VMID_INVALIDATE_REQ, 1 << vmid);
414 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
415 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
416 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
417 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
418 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
419 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
420 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
421 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
428 * VMID 0 is the physical GPU addresses as used by the kernel.
429 * VMIDs 1-15 are used for userspace clients and are handled
430 * by the amdgpu vm/hsa code.
434 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
436 * @adev: amdgpu_device pointer
437 * @vmid: vm instance to flush
438 * @flush_type: the flush type
440 * Flush the TLB for the requested page table using certain type.
442 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
443 uint32_t vmid, uint32_t flush_type)
445 const unsigned eng = 17;
448 for (i = 0; i < adev->num_vmhubs; ++i) {
449 struct amdgpu_vmhub *hub = &adev->vmhub[i];
450 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
452 /* This is necessary for a HW workaround under SRIOV as well
453 * as GFXOFF under bare metal
455 if (adev->gfx.kiq.ring.sched.ready &&
456 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
457 !adev->in_gpu_reset) {
458 uint32_t req = hub->vm_inv_eng0_req + eng;
459 uint32_t ack = hub->vm_inv_eng0_ack + eng;
461 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
466 spin_lock(&adev->gmc.invalidate_lock);
467 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
468 for (j = 0; j < adev->usec_timeout; j++) {
469 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
470 if (tmp & (1 << vmid))
474 spin_unlock(&adev->gmc.invalidate_lock);
475 if (j < adev->usec_timeout)
478 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
482 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
483 unsigned vmid, uint64_t pd_addr)
485 struct amdgpu_device *adev = ring->adev;
486 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
487 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
488 unsigned eng = ring->vm_inv_eng;
490 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
491 lower_32_bits(pd_addr));
493 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
494 upper_32_bits(pd_addr));
496 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
497 hub->vm_inv_eng0_ack + eng,
503 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
506 struct amdgpu_device *adev = ring->adev;
509 /* Do nothing because there's no lut register for mmhub1. */
510 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
513 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
514 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
516 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
518 amdgpu_ring_emit_wreg(ring, reg, pasid);
522 * PTE format on VEGA 10:
531 * 47:12 4k physical page base address
541 * PDE format on VEGA 10:
542 * 63:59 block fragment size
546 * 47:6 physical base address of PD or PTE
553 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
557 uint64_t pte_flag = 0;
559 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
560 pte_flag |= AMDGPU_PTE_EXECUTABLE;
561 if (flags & AMDGPU_VM_PAGE_READABLE)
562 pte_flag |= AMDGPU_PTE_READABLE;
563 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
564 pte_flag |= AMDGPU_PTE_WRITEABLE;
566 switch (flags & AMDGPU_VM_MTYPE_MASK) {
567 case AMDGPU_VM_MTYPE_DEFAULT:
568 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
570 case AMDGPU_VM_MTYPE_NC:
571 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
573 case AMDGPU_VM_MTYPE_WC:
574 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
576 case AMDGPU_VM_MTYPE_CC:
577 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
579 case AMDGPU_VM_MTYPE_UC:
580 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
583 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
587 if (flags & AMDGPU_VM_PAGE_PRT)
588 pte_flag |= AMDGPU_PTE_PRT;
593 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
594 uint64_t *addr, uint64_t *flags)
596 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
597 *addr = adev->vm_manager.vram_base_offset + *addr -
598 adev->gmc.vram_start;
599 BUG_ON(*addr & 0xFFFF00000000003FULL);
601 if (!adev->gmc.translate_further)
604 if (level == AMDGPU_VM_PDB1) {
605 /* Set the block fragment size */
606 if (!(*flags & AMDGPU_PDE_PTE))
607 *flags |= AMDGPU_PDE_BFS(0x9);
609 } else if (level == AMDGPU_VM_PDB0) {
610 if (*flags & AMDGPU_PDE_PTE)
611 *flags &= ~AMDGPU_PDE_PTE;
613 *flags |= AMDGPU_PTE_TF;
617 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
618 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
619 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
620 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
621 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
622 .get_vm_pde = gmc_v9_0_get_vm_pde
625 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
627 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
630 static int gmc_v9_0_early_init(void *handle)
632 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634 gmc_v9_0_set_gmc_funcs(adev);
635 gmc_v9_0_set_irq_funcs(adev);
637 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
638 adev->gmc.shared_aperture_end =
639 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
640 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
641 adev->gmc.private_aperture_end =
642 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
647 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
652 * Currently there is a bug where some memory client outside
653 * of the driver writes to first 8M of VRAM on S3 resume,
654 * this overrides GART which by default gets placed in first 8M and
655 * causes VM_FAULTS once GTT is accessed.
656 * Keep the stolen memory reservation until the while this is not solved.
657 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
659 switch (adev->asic_type) {
671 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
673 struct amdgpu_ring *ring;
674 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
675 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
676 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
678 unsigned vmhub, inv_eng;
680 for (i = 0; i < adev->num_rings; ++i) {
681 ring = adev->rings[i];
682 vmhub = ring->funcs->vmhub;
684 inv_eng = ffs(vm_inv_engs[vmhub]);
686 dev_err(adev->dev, "no VM inv eng for ring %s\n",
691 ring->vm_inv_eng = inv_eng - 1;
692 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
694 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
695 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
701 static int gmc_v9_0_ecc_late_init(void *handle)
703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
704 struct ras_common_if **ras_if = &adev->gmc.ras_if;
705 struct ras_ih_if ih_info = {
706 .cb = gmc_v9_0_process_ras_data_cb,
708 struct ras_fs_if fs_info = {
709 .sysfs_name = "umc_err_count",
710 .debugfs_name = "umc_err_inject",
712 struct ras_common_if ras_block = {
713 .block = AMDGPU_RAS_BLOCK__UMC,
714 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
715 .sub_block_index = 0,
720 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
721 amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
724 /* handle resume path. */
726 /* resend ras TA enable cmd during resume.
727 * prepare to handle failure.
729 ih_info.head = **ras_if;
730 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
733 /* request a gpu reset. will run again. */
734 amdgpu_ras_request_reset_on_boot(adev,
735 AMDGPU_RAS_BLOCK__UMC);
738 /* fail to enable ras, cleanup all. */
741 /* enable successfully. continue. */
745 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
749 **ras_if = ras_block;
751 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
754 amdgpu_ras_request_reset_on_boot(adev,
755 AMDGPU_RAS_BLOCK__UMC);
761 ih_info.head = **ras_if;
762 fs_info.head = **ras_if;
764 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
768 amdgpu_ras_debugfs_create(adev, &fs_info);
770 r = amdgpu_ras_sysfs_create(adev, &fs_info);
774 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
780 amdgpu_ras_sysfs_remove(adev, *ras_if);
782 amdgpu_ras_debugfs_remove(adev, *ras_if);
783 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
785 amdgpu_ras_feature_enable(adev, *ras_if, 0);
793 static int gmc_v9_0_late_init(void *handle)
795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
798 if (!gmc_v9_0_keep_stolen_memory(adev))
799 amdgpu_bo_late_init(adev);
801 r = gmc_v9_0_allocate_vm_inv_eng(adev);
804 /* Check if ecc is available */
805 if (!amdgpu_sriov_vf(adev)) {
806 switch (adev->asic_type) {
809 r = amdgpu_atomfirmware_mem_ecc_supported(adev);
811 DRM_INFO("ECC is not present.\n");
812 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
813 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
815 DRM_INFO("ECC is active.\n");
818 r = amdgpu_atomfirmware_sram_ecc_supported(adev);
820 DRM_INFO("SRAM ECC is not present.\n");
822 DRM_INFO("SRAM ECC is active.\n");
830 r = gmc_v9_0_ecc_late_init(handle);
834 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
837 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
838 struct amdgpu_gmc *mc)
841 if (!amdgpu_sriov_vf(adev)) {
842 if (adev->asic_type == CHIP_ARCTURUS)
843 base = mmhub_v9_4_get_fb_location(adev);
845 base = mmhub_v1_0_get_fb_location(adev);
847 /* add the xgmi offset of the physical node */
848 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
849 amdgpu_gmc_vram_location(adev, mc, base);
850 amdgpu_gmc_gart_location(adev, mc);
851 if (!amdgpu_sriov_vf(adev))
852 amdgpu_gmc_agp_location(adev, mc);
853 /* base offset of vram pages */
854 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
856 /* XXX: add the xgmi offset of the physical node? */
857 adev->vm_manager.vram_base_offset +=
858 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
862 * gmc_v9_0_mc_init - initialize the memory controller driver params
864 * @adev: amdgpu_device pointer
866 * Look up the amount of vram, vram width, and decide how to place
867 * vram and gart within the GPU's physical address space.
868 * Returns 0 for success.
870 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
872 int chansize, numchan;
875 if (amdgpu_sriov_vf(adev)) {
876 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
877 * and DF related registers is not readable, seems hardcord is the
878 * only way to set the correct vram_width
880 adev->gmc.vram_width = 2048;
881 } else if (amdgpu_emu_mode != 1) {
882 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
885 if (!adev->gmc.vram_width) {
886 /* hbm memory channel size */
887 if (adev->flags & AMD_IS_APU)
892 numchan = adev->df_funcs->get_hbm_channel_number(adev);
893 adev->gmc.vram_width = numchan * chansize;
896 /* size in MB on si */
897 adev->gmc.mc_vram_size =
898 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
899 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
901 if (!(adev->flags & AMD_IS_APU)) {
902 r = amdgpu_device_resize_fb_bar(adev);
906 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
907 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
910 if (adev->flags & AMD_IS_APU) {
911 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
912 adev->gmc.aper_size = adev->gmc.real_vram_size;
915 /* In case the PCI BAR is larger than the actual amount of vram */
916 adev->gmc.visible_vram_size = adev->gmc.aper_size;
917 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
918 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
920 /* set the gart size */
921 if (amdgpu_gart_size == -1) {
922 switch (adev->asic_type) {
923 case CHIP_VEGA10: /* all engines support GPUVM */
924 case CHIP_VEGA12: /* all engines support GPUVM */
928 adev->gmc.gart_size = 512ULL << 20;
930 case CHIP_RAVEN: /* DCE SG support */
931 adev->gmc.gart_size = 1024ULL << 20;
935 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
938 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
943 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
948 WARN(1, "VEGA10 PCIE GART already initialized\n");
951 /* Initialize common gart structure */
952 r = amdgpu_gart_init(adev);
955 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
956 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
957 AMDGPU_PTE_EXECUTABLE;
958 return amdgpu_gart_table_vram_alloc(adev);
961 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
967 * TODO Remove once GART corruption is resolved
968 * Check related code in gmc_v9_0_sw_fini
970 if (gmc_v9_0_keep_stolen_memory(adev))
971 return 9 * 1024 * 1024;
973 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
974 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
975 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
979 switch (adev->asic_type) {
981 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
982 size = (REG_GET_FIELD(viewport,
983 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
984 REG_GET_FIELD(viewport,
985 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
992 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
993 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
994 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
999 /* return 0 if the pre-OS buffer uses up most of vram */
1000 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1006 static int gmc_v9_0_sw_init(void *handle)
1010 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1012 gfxhub_v1_0_init(adev);
1013 if (adev->asic_type == CHIP_ARCTURUS)
1014 mmhub_v9_4_init(adev);
1016 mmhub_v1_0_init(adev);
1018 spin_lock_init(&adev->gmc.invalidate_lock);
1020 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1021 switch (adev->asic_type) {
1023 adev->num_vmhubs = 2;
1025 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1026 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1028 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1029 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1030 adev->gmc.translate_further =
1031 adev->vm_manager.num_level > 1;
1037 adev->num_vmhubs = 2;
1040 * To fulfill 4-level page support,
1041 * vm size is 256TB (48bit), maximum size of Vega10,
1042 * block size 512 (9bit)
1044 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1045 if (amdgpu_sriov_vf(adev))
1046 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1048 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1051 adev->num_vmhubs = 3;
1053 /* Keep the vm size same with Vega20 */
1054 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1060 /* This interrupt is VMC page fault.*/
1061 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1062 &adev->gmc.vm_fault);
1066 if (adev->asic_type == CHIP_ARCTURUS) {
1067 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1068 &adev->gmc.vm_fault);
1073 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1074 &adev->gmc.vm_fault);
1079 /* interrupt sent to DF. */
1080 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1081 &adev->gmc.ecc_irq);
1085 /* Set the internal MC address mask
1086 * This is the max address of the GPU's
1087 * internal address space.
1089 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1091 /* set DMA mask + need_dma32 flags.
1092 * PCIE - can handle 44-bits.
1093 * IGP - can handle 44-bits
1094 * PCI - dma32 for legacy pci gart, 44 bits on vega10
1096 adev->need_dma32 = false;
1097 dma_bits = adev->need_dma32 ? 32 : 44;
1098 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1100 adev->need_dma32 = true;
1102 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1104 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1106 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1107 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1109 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1111 if (adev->gmc.xgmi.supported) {
1112 r = gfxhub_v1_1_get_xgmi_info(adev);
1117 r = gmc_v9_0_mc_init(adev);
1121 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1123 /* Memory manager */
1124 r = amdgpu_bo_init(adev);
1128 r = gmc_v9_0_gart_init(adev);
1134 * VMID 0 is reserved for System
1135 * amdgpu graphics/compute will use VMIDs 1-7
1136 * amdkfd will use VMIDs 8-15
1138 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1139 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1140 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1142 amdgpu_vm_manager_init(adev);
1147 static int gmc_v9_0_sw_fini(void *handle)
1149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1151 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1153 struct ras_common_if *ras_if = adev->gmc.ras_if;
1154 struct ras_ih_if ih_info = {
1159 amdgpu_ras_debugfs_remove(adev, ras_if);
1160 amdgpu_ras_sysfs_remove(adev, ras_if);
1162 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1163 amdgpu_ras_feature_enable(adev, ras_if, 0);
1167 amdgpu_gem_force_release(adev);
1168 amdgpu_vm_manager_fini(adev);
1170 if (gmc_v9_0_keep_stolen_memory(adev))
1171 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1173 amdgpu_gart_table_vram_free(adev);
1174 amdgpu_bo_fini(adev);
1175 amdgpu_gart_fini(adev);
1180 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1183 switch (adev->asic_type) {
1185 if (amdgpu_virt_support_skip_setting(adev))
1189 soc15_program_register_sequence(adev,
1190 golden_settings_mmhub_1_0_0,
1191 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1192 soc15_program_register_sequence(adev,
1193 golden_settings_athub_1_0_0,
1194 ARRAY_SIZE(golden_settings_athub_1_0_0));
1199 soc15_program_register_sequence(adev,
1200 golden_settings_athub_1_0_0,
1201 ARRAY_SIZE(golden_settings_athub_1_0_0));
1209 * gmc_v9_0_gart_enable - gart enable
1211 * @adev: amdgpu_device pointer
1213 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1219 amdgpu_device_program_register_sequence(adev,
1220 golden_settings_vega10_hdp,
1221 ARRAY_SIZE(golden_settings_vega10_hdp));
1223 if (adev->gart.bo == NULL) {
1224 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1227 r = amdgpu_gart_table_vram_pin(adev);
1231 switch (adev->asic_type) {
1233 mmhub_v1_0_update_power_gating(adev, true);
1239 r = gfxhub_v1_0_gart_enable(adev);
1243 if (adev->asic_type == CHIP_ARCTURUS)
1244 r = mmhub_v9_4_gart_enable(adev);
1246 r = mmhub_v1_0_gart_enable(adev);
1250 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1252 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1253 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1255 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1256 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1258 /* After HDP is initialized, flush HDP.*/
1259 adev->nbio_funcs->hdp_flush(adev, NULL);
1261 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1266 gfxhub_v1_0_set_fault_enable_default(adev, value);
1267 if (adev->asic_type == CHIP_ARCTURUS)
1268 mmhub_v9_4_set_fault_enable_default(adev, value);
1270 mmhub_v1_0_set_fault_enable_default(adev, value);
1271 gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1273 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1274 (unsigned)(adev->gmc.gart_size >> 20),
1275 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1276 adev->gart.ready = true;
1280 static int gmc_v9_0_hw_init(void *handle)
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1285 /* The sequence of these two function calls matters.*/
1286 gmc_v9_0_init_golden_registers(adev);
1288 if (adev->mode_info.num_crtc) {
1289 /* Lockout access through VGA aperture*/
1290 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1292 /* disable VGA render */
1293 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1296 r = gmc_v9_0_gart_enable(adev);
1302 * gmc_v9_0_gart_disable - gart disable
1304 * @adev: amdgpu_device pointer
1306 * This disables all VM page table.
1308 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1310 gfxhub_v1_0_gart_disable(adev);
1311 if (adev->asic_type == CHIP_ARCTURUS)
1312 mmhub_v9_4_gart_disable(adev);
1314 mmhub_v1_0_gart_disable(adev);
1315 amdgpu_gart_table_vram_unpin(adev);
1318 static int gmc_v9_0_hw_fini(void *handle)
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322 if (amdgpu_sriov_vf(adev)) {
1323 /* full access mode, so don't touch any GMC register */
1324 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1328 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1329 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1330 gmc_v9_0_gart_disable(adev);
1335 static int gmc_v9_0_suspend(void *handle)
1337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1339 return gmc_v9_0_hw_fini(adev);
1342 static int gmc_v9_0_resume(void *handle)
1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347 r = gmc_v9_0_hw_init(adev);
1351 amdgpu_vmid_reset_all(adev);
1356 static bool gmc_v9_0_is_idle(void *handle)
1358 /* MC is always ready in GMC v9.*/
1362 static int gmc_v9_0_wait_for_idle(void *handle)
1364 /* There is no need to wait for MC idle in GMC v9.*/
1368 static int gmc_v9_0_soft_reset(void *handle)
1370 /* XXX for emulation.*/
1374 static int gmc_v9_0_set_clockgating_state(void *handle,
1375 enum amd_clockgating_state state)
1377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 if (adev->asic_type == CHIP_ARCTURUS)
1382 return mmhub_v1_0_set_clockgating(adev, state);
1385 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 if (adev->asic_type == CHIP_ARCTURUS)
1392 mmhub_v1_0_get_clockgating(adev, flags);
1395 static int gmc_v9_0_set_powergating_state(void *handle,
1396 enum amd_powergating_state state)
1401 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1403 .early_init = gmc_v9_0_early_init,
1404 .late_init = gmc_v9_0_late_init,
1405 .sw_init = gmc_v9_0_sw_init,
1406 .sw_fini = gmc_v9_0_sw_fini,
1407 .hw_init = gmc_v9_0_hw_init,
1408 .hw_fini = gmc_v9_0_hw_fini,
1409 .suspend = gmc_v9_0_suspend,
1410 .resume = gmc_v9_0_resume,
1411 .is_idle = gmc_v9_0_is_idle,
1412 .wait_for_idle = gmc_v9_0_wait_for_idle,
1413 .soft_reset = gmc_v9_0_soft_reset,
1414 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1415 .set_powergating_state = gmc_v9_0_set_powergating_state,
1416 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1419 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1421 .type = AMD_IP_BLOCK_TYPE_GMC,
1425 .funcs = &gmc_v9_0_ip_funcs,