2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_offset.h"
42 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
55 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
57 #include "amdgpu_ras.h"
59 /* add these here since we already include dce12 headers and these are for DCN */
60 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
61 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
62 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
67 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
68 #define AMDGPU_NUM_OF_VMIDS 8
70 static const u32 golden_settings_vega10_hdp[] =
72 0xf64, 0x0fffffff, 0x00000000,
73 0xf65, 0x0fffffff, 0x00000000,
74 0xf66, 0x0fffffff, 0x00000000,
75 0xf67, 0x0fffffff, 0x00000000,
76 0xf68, 0x0fffffff, 0x00000000,
77 0xf6a, 0x0fffffff, 0x00000000,
78 0xf6b, 0x0fffffff, 0x00000000,
79 0xf6c, 0x0fffffff, 0x00000000,
80 0xf6d, 0x0fffffff, 0x00000000,
81 0xf6e, 0x0fffffff, 0x00000000,
84 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
86 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
87 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
90 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
92 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
93 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
96 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
97 (0x000143c0 + 0x00000000),
98 (0x000143c0 + 0x00000800),
99 (0x000143c0 + 0x00001000),
100 (0x000143c0 + 0x00001800),
101 (0x000543c0 + 0x00000000),
102 (0x000543c0 + 0x00000800),
103 (0x000543c0 + 0x00001000),
104 (0x000543c0 + 0x00001800),
105 (0x000943c0 + 0x00000000),
106 (0x000943c0 + 0x00000800),
107 (0x000943c0 + 0x00001000),
108 (0x000943c0 + 0x00001800),
109 (0x000d43c0 + 0x00000000),
110 (0x000d43c0 + 0x00000800),
111 (0x000d43c0 + 0x00001000),
112 (0x000d43c0 + 0x00001800),
113 (0x001143c0 + 0x00000000),
114 (0x001143c0 + 0x00000800),
115 (0x001143c0 + 0x00001000),
116 (0x001143c0 + 0x00001800),
117 (0x001543c0 + 0x00000000),
118 (0x001543c0 + 0x00000800),
119 (0x001543c0 + 0x00001000),
120 (0x001543c0 + 0x00001800),
121 (0x001943c0 + 0x00000000),
122 (0x001943c0 + 0x00000800),
123 (0x001943c0 + 0x00001000),
124 (0x001943c0 + 0x00001800),
125 (0x001d43c0 + 0x00000000),
126 (0x001d43c0 + 0x00000800),
127 (0x001d43c0 + 0x00001000),
128 (0x001d43c0 + 0x00001800),
131 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
132 (0x000143e0 + 0x00000000),
133 (0x000143e0 + 0x00000800),
134 (0x000143e0 + 0x00001000),
135 (0x000143e0 + 0x00001800),
136 (0x000543e0 + 0x00000000),
137 (0x000543e0 + 0x00000800),
138 (0x000543e0 + 0x00001000),
139 (0x000543e0 + 0x00001800),
140 (0x000943e0 + 0x00000000),
141 (0x000943e0 + 0x00000800),
142 (0x000943e0 + 0x00001000),
143 (0x000943e0 + 0x00001800),
144 (0x000d43e0 + 0x00000000),
145 (0x000d43e0 + 0x00000800),
146 (0x000d43e0 + 0x00001000),
147 (0x000d43e0 + 0x00001800),
148 (0x001143e0 + 0x00000000),
149 (0x001143e0 + 0x00000800),
150 (0x001143e0 + 0x00001000),
151 (0x001143e0 + 0x00001800),
152 (0x001543e0 + 0x00000000),
153 (0x001543e0 + 0x00000800),
154 (0x001543e0 + 0x00001000),
155 (0x001543e0 + 0x00001800),
156 (0x001943e0 + 0x00000000),
157 (0x001943e0 + 0x00000800),
158 (0x001943e0 + 0x00001000),
159 (0x001943e0 + 0x00001800),
160 (0x001d43e0 + 0x00000000),
161 (0x001d43e0 + 0x00000800),
162 (0x001d43e0 + 0x00001000),
163 (0x001d43e0 + 0x00001800),
166 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
167 (0x000143c2 + 0x00000000),
168 (0x000143c2 + 0x00000800),
169 (0x000143c2 + 0x00001000),
170 (0x000143c2 + 0x00001800),
171 (0x000543c2 + 0x00000000),
172 (0x000543c2 + 0x00000800),
173 (0x000543c2 + 0x00001000),
174 (0x000543c2 + 0x00001800),
175 (0x000943c2 + 0x00000000),
176 (0x000943c2 + 0x00000800),
177 (0x000943c2 + 0x00001000),
178 (0x000943c2 + 0x00001800),
179 (0x000d43c2 + 0x00000000),
180 (0x000d43c2 + 0x00000800),
181 (0x000d43c2 + 0x00001000),
182 (0x000d43c2 + 0x00001800),
183 (0x001143c2 + 0x00000000),
184 (0x001143c2 + 0x00000800),
185 (0x001143c2 + 0x00001000),
186 (0x001143c2 + 0x00001800),
187 (0x001543c2 + 0x00000000),
188 (0x001543c2 + 0x00000800),
189 (0x001543c2 + 0x00001000),
190 (0x001543c2 + 0x00001800),
191 (0x001943c2 + 0x00000000),
192 (0x001943c2 + 0x00000800),
193 (0x001943c2 + 0x00001000),
194 (0x001943c2 + 0x00001800),
195 (0x001d43c2 + 0x00000000),
196 (0x001d43c2 + 0x00000800),
197 (0x001d43c2 + 0x00001000),
198 (0x001d43c2 + 0x00001800),
201 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
202 struct amdgpu_irq_src *src,
204 enum amdgpu_interrupt_state state)
206 u32 bits, i, tmp, reg;
211 case AMDGPU_IRQ_STATE_DISABLE:
212 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
213 reg = ecc_umc_mcumc_ctrl_addrs[i];
218 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
219 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
225 case AMDGPU_IRQ_STATE_ENABLE:
226 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
227 reg = ecc_umc_mcumc_ctrl_addrs[i];
232 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
233 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
246 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
247 struct ras_err_data *err_data,
248 struct amdgpu_iv_entry *entry)
250 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
251 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
252 if (adev->umc.funcs->query_ras_error_count)
253 adev->umc.funcs->query_ras_error_count(adev, err_data);
254 /* umc query_ras_error_address is also responsible for clearing
257 if (adev->umc.funcs->query_ras_error_address)
258 adev->umc.funcs->query_ras_error_address(adev, err_data);
260 /* only uncorrectable error needs gpu reset */
261 if (err_data->ue_count)
262 amdgpu_ras_reset_gpu(adev, 0);
265 return AMDGPU_RAS_SUCCESS;
268 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
269 struct amdgpu_irq_src *source,
270 struct amdgpu_iv_entry *entry)
272 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
273 struct ras_dispatch_if ih_data = {
280 ih_data.head = *ras_if;
282 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
286 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
287 struct amdgpu_irq_src *src,
289 enum amdgpu_interrupt_state state)
291 struct amdgpu_vmhub *hub;
292 u32 tmp, reg, bits, i, j;
294 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
295 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
296 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
297 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
298 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
299 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
300 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
303 case AMDGPU_IRQ_STATE_DISABLE:
304 for (j = 0; j < adev->num_vmhubs; j++) {
305 hub = &adev->vmhub[j];
306 for (i = 0; i < 16; i++) {
307 reg = hub->vm_context0_cntl + i;
314 case AMDGPU_IRQ_STATE_ENABLE:
315 for (j = 0; j < adev->num_vmhubs; j++) {
316 hub = &adev->vmhub[j];
317 for (i = 0; i < 16; i++) {
318 reg = hub->vm_context0_cntl + i;
331 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
332 struct amdgpu_irq_src *source,
333 struct amdgpu_iv_entry *entry)
335 struct amdgpu_vmhub *hub;
336 bool retry_fault = !!(entry->src_data[1] & 0x80);
341 addr = (u64)entry->src_data[0] << 12;
342 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
344 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
346 return 1; /* This also prevents sending it to KFD */
348 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
349 snprintf(hub_name, sizeof(hub_name), "mmhub0");
350 hub = &adev->vmhub[AMDGPU_MMHUB_0];
351 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
352 snprintf(hub_name, sizeof(hub_name), "mmhub1");
353 hub = &adev->vmhub[AMDGPU_MMHUB_1];
355 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
356 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
359 /* If it's the first fault for this address, process it normally */
360 if (!amdgpu_sriov_vf(adev)) {
362 * Issue a dummy read to wait for the status register to
363 * be updated to avoid reading an incorrect value due to
364 * the new fast GRBM interface.
366 if (entry->vmid_src == AMDGPU_GFXHUB_0)
367 RREG32(hub->vm_l2_pro_fault_status);
369 status = RREG32(hub->vm_l2_pro_fault_status);
370 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
373 if (printk_ratelimit()) {
374 struct amdgpu_task_info task_info;
376 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
377 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
380 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
381 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
382 hub_name, retry_fault ? "retry" : "no-retry",
383 entry->src_id, entry->ring_id, entry->vmid,
384 entry->pasid, task_info.process_name, task_info.tgid,
385 task_info.task_name, task_info.pid);
386 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
387 addr, entry->client_id);
388 if (!amdgpu_sriov_vf(adev)) {
390 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
392 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
393 REG_GET_FIELD(status,
394 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
395 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
396 REG_GET_FIELD(status,
397 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
398 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
399 REG_GET_FIELD(status,
400 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
401 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
402 REG_GET_FIELD(status,
403 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
404 dev_err(adev->dev, "\t RW: 0x%lx\n",
405 REG_GET_FIELD(status,
406 VM_L2_PROTECTION_FAULT_STATUS, RW));
414 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
415 .set = gmc_v9_0_vm_fault_interrupt_state,
416 .process = gmc_v9_0_process_interrupt,
420 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
421 .set = gmc_v9_0_ecc_interrupt_state,
422 .process = gmc_v9_0_process_ecc_irq,
425 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
427 adev->gmc.vm_fault.num_types = 1;
428 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
430 adev->gmc.ecc_irq.num_types = 1;
431 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
434 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
439 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
440 PER_VMID_INVALIDATE_REQ, 1 << vmid);
441 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
442 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
443 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
444 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
445 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
446 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
447 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
448 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
455 * VMID 0 is the physical GPU addresses as used by the kernel.
456 * VMIDs 1-15 are used for userspace clients and are handled
457 * by the amdgpu vm/hsa code.
461 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
463 * @adev: amdgpu_device pointer
464 * @vmid: vm instance to flush
465 * @flush_type: the flush type
467 * Flush the TLB for the requested page table using certain type.
469 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
470 uint32_t vmhub, uint32_t flush_type)
472 const unsigned eng = 17;
474 struct amdgpu_vmhub *hub;
476 BUG_ON(vmhub >= adev->num_vmhubs);
478 hub = &adev->vmhub[vmhub];
479 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
481 /* This is necessary for a HW workaround under SRIOV as well
482 * as GFXOFF under bare metal
484 if (adev->gfx.kiq.ring.sched.ready &&
485 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
486 !adev->in_gpu_reset) {
487 uint32_t req = hub->vm_inv_eng0_req + eng;
488 uint32_t ack = hub->vm_inv_eng0_ack + eng;
490 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
495 spin_lock(&adev->gmc.invalidate_lock);
496 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
499 * Issue a dummy read to wait for the ACK register to be cleared
500 * to avoid a false ACK due to the new fast GRBM interface.
502 if (vmhub == AMDGPU_GFXHUB_0)
503 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
505 for (j = 0; j < adev->usec_timeout; j++) {
506 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
507 if (tmp & (1 << vmid))
511 spin_unlock(&adev->gmc.invalidate_lock);
512 if (j < adev->usec_timeout)
515 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
518 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
519 unsigned vmid, uint64_t pd_addr)
521 struct amdgpu_device *adev = ring->adev;
522 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
523 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
524 unsigned eng = ring->vm_inv_eng;
526 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
527 lower_32_bits(pd_addr));
529 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
530 upper_32_bits(pd_addr));
532 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
533 hub->vm_inv_eng0_ack + eng,
539 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
542 struct amdgpu_device *adev = ring->adev;
545 /* Do nothing because there's no lut register for mmhub1. */
546 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
549 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
550 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
552 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
554 amdgpu_ring_emit_wreg(ring, reg, pasid);
558 * PTE format on VEGA 10:
567 * 47:12 4k physical page base address
577 * PDE format on VEGA 10:
578 * 63:59 block fragment size
582 * 47:6 physical base address of PD or PTE
589 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
593 uint64_t pte_flag = 0;
595 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
596 pte_flag |= AMDGPU_PTE_EXECUTABLE;
597 if (flags & AMDGPU_VM_PAGE_READABLE)
598 pte_flag |= AMDGPU_PTE_READABLE;
599 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
600 pte_flag |= AMDGPU_PTE_WRITEABLE;
602 switch (flags & AMDGPU_VM_MTYPE_MASK) {
603 case AMDGPU_VM_MTYPE_DEFAULT:
604 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
606 case AMDGPU_VM_MTYPE_NC:
607 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
609 case AMDGPU_VM_MTYPE_WC:
610 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
612 case AMDGPU_VM_MTYPE_RW:
613 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
615 case AMDGPU_VM_MTYPE_CC:
616 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
618 case AMDGPU_VM_MTYPE_UC:
619 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
622 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
626 if (flags & AMDGPU_VM_PAGE_PRT)
627 pte_flag |= AMDGPU_PTE_PRT;
632 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
633 uint64_t *addr, uint64_t *flags)
635 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
636 *addr = adev->vm_manager.vram_base_offset + *addr -
637 adev->gmc.vram_start;
638 BUG_ON(*addr & 0xFFFF00000000003FULL);
640 if (!adev->gmc.translate_further)
643 if (level == AMDGPU_VM_PDB1) {
644 /* Set the block fragment size */
645 if (!(*flags & AMDGPU_PDE_PTE))
646 *flags |= AMDGPU_PDE_BFS(0x9);
648 } else if (level == AMDGPU_VM_PDB0) {
649 if (*flags & AMDGPU_PDE_PTE)
650 *flags &= ~AMDGPU_PDE_PTE;
652 *flags |= AMDGPU_PTE_TF;
656 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
657 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
658 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
659 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
660 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
661 .get_vm_pde = gmc_v9_0_get_vm_pde
664 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
666 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
669 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
671 switch (adev->asic_type) {
673 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
674 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
675 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
676 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
677 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
678 adev->umc.funcs = &umc_v6_1_funcs;
685 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
687 switch (adev->asic_type) {
689 adev->mmhub_funcs = &mmhub_v1_0_funcs;
696 static int gmc_v9_0_early_init(void *handle)
698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
700 gmc_v9_0_set_gmc_funcs(adev);
701 gmc_v9_0_set_irq_funcs(adev);
702 gmc_v9_0_set_umc_funcs(adev);
703 gmc_v9_0_set_mmhub_funcs(adev);
705 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
706 adev->gmc.shared_aperture_end =
707 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
708 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
709 adev->gmc.private_aperture_end =
710 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
715 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
720 * Currently there is a bug where some memory client outside
721 * of the driver writes to first 8M of VRAM on S3 resume,
722 * this overrides GART which by default gets placed in first 8M and
723 * causes VM_FAULTS once GTT is accessed.
724 * Keep the stolen memory reservation until the while this is not solved.
725 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
727 switch (adev->asic_type) {
740 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
742 struct amdgpu_ring *ring;
743 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
744 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
745 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
747 unsigned vmhub, inv_eng;
749 for (i = 0; i < adev->num_rings; ++i) {
750 ring = adev->rings[i];
751 vmhub = ring->funcs->vmhub;
753 inv_eng = ffs(vm_inv_engs[vmhub]);
755 dev_err(adev->dev, "no VM inv eng for ring %s\n",
760 ring->vm_inv_eng = inv_eng - 1;
761 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
763 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
764 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
770 static int gmc_v9_0_ecc_late_init(void *handle)
773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 struct ras_fs_if umc_fs_info = {
775 .sysfs_name = "umc_err_count",
776 .debugfs_name = "umc_err_inject",
778 struct ras_ih_if umc_ih_info = {
779 .cb = gmc_v9_0_process_ras_data_cb,
782 if (!adev->gmc.umc_ras_if) {
783 adev->gmc.umc_ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
784 if (!adev->gmc.umc_ras_if)
786 adev->gmc.umc_ras_if->block = AMDGPU_RAS_BLOCK__UMC;
787 adev->gmc.umc_ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
788 adev->gmc.umc_ras_if->sub_block_index = 0;
789 strcpy(adev->gmc.umc_ras_if->name, "umc");
791 umc_ih_info.head = umc_fs_info.head = *adev->gmc.umc_ras_if;
793 r = amdgpu_ras_late_init(adev, adev->gmc.umc_ras_if,
794 &umc_fs_info, &umc_ih_info);
798 if (amdgpu_ras_is_supported(adev, adev->gmc.umc_ras_if->block)) {
799 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
803 kfree(adev->gmc.umc_ras_if);
805 if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
806 r = adev->mmhub_funcs->ras_late_init(adev);
812 amdgpu_ras_late_fini(adev, adev->gmc.umc_ras_if, &umc_ih_info);
814 kfree(adev->gmc.umc_ras_if);
818 static int gmc_v9_0_late_init(void *handle)
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
823 if (!gmc_v9_0_keep_stolen_memory(adev))
824 amdgpu_bo_late_init(adev);
826 r = gmc_v9_0_allocate_vm_inv_eng(adev);
829 /* Check if ecc is available */
830 if (!amdgpu_sriov_vf(adev)) {
831 switch (adev->asic_type) {
834 r = amdgpu_atomfirmware_mem_ecc_supported(adev);
836 DRM_INFO("ECC is not present.\n");
837 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
838 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
840 DRM_INFO("ECC is active.\n");
843 r = amdgpu_atomfirmware_sram_ecc_supported(adev);
845 DRM_INFO("SRAM ECC is not present.\n");
847 DRM_INFO("SRAM ECC is active.\n");
855 r = gmc_v9_0_ecc_late_init(handle);
859 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
862 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
863 struct amdgpu_gmc *mc)
867 if (adev->asic_type == CHIP_ARCTURUS)
868 base = mmhub_v9_4_get_fb_location(adev);
869 else if (!amdgpu_sriov_vf(adev))
870 base = mmhub_v1_0_get_fb_location(adev);
872 /* add the xgmi offset of the physical node */
873 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
874 amdgpu_gmc_vram_location(adev, mc, base);
875 amdgpu_gmc_gart_location(adev, mc);
876 amdgpu_gmc_agp_location(adev, mc);
877 /* base offset of vram pages */
878 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
880 /* XXX: add the xgmi offset of the physical node? */
881 adev->vm_manager.vram_base_offset +=
882 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
886 * gmc_v9_0_mc_init - initialize the memory controller driver params
888 * @adev: amdgpu_device pointer
890 * Look up the amount of vram, vram width, and decide how to place
891 * vram and gart within the GPU's physical address space.
892 * Returns 0 for success.
894 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
896 int chansize, numchan;
899 if (amdgpu_sriov_vf(adev)) {
900 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
901 * and DF related registers is not readable, seems hardcord is the
902 * only way to set the correct vram_width
904 adev->gmc.vram_width = 2048;
905 } else if (amdgpu_emu_mode != 1) {
906 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
909 if (!adev->gmc.vram_width) {
910 /* hbm memory channel size */
911 if (adev->flags & AMD_IS_APU)
916 numchan = adev->df_funcs->get_hbm_channel_number(adev);
917 adev->gmc.vram_width = numchan * chansize;
920 /* size in MB on si */
921 adev->gmc.mc_vram_size =
922 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
923 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
925 if (!(adev->flags & AMD_IS_APU)) {
926 r = amdgpu_device_resize_fb_bar(adev);
930 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
931 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
934 if (adev->flags & AMD_IS_APU) {
935 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
936 adev->gmc.aper_size = adev->gmc.real_vram_size;
939 /* In case the PCI BAR is larger than the actual amount of vram */
940 adev->gmc.visible_vram_size = adev->gmc.aper_size;
941 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
942 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
944 /* set the gart size */
945 if (amdgpu_gart_size == -1) {
946 switch (adev->asic_type) {
947 case CHIP_VEGA10: /* all engines support GPUVM */
948 case CHIP_VEGA12: /* all engines support GPUVM */
952 adev->gmc.gart_size = 512ULL << 20;
954 case CHIP_RAVEN: /* DCE SG support */
956 adev->gmc.gart_size = 1024ULL << 20;
960 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
963 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
968 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
973 WARN(1, "VEGA10 PCIE GART already initialized\n");
976 /* Initialize common gart structure */
977 r = amdgpu_gart_init(adev);
980 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
981 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
982 AMDGPU_PTE_EXECUTABLE;
983 return amdgpu_gart_table_vram_alloc(adev);
986 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
992 * TODO Remove once GART corruption is resolved
993 * Check related code in gmc_v9_0_sw_fini
995 if (gmc_v9_0_keep_stolen_memory(adev))
996 return 9 * 1024 * 1024;
998 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
999 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1000 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1004 switch (adev->asic_type) {
1007 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1008 size = (REG_GET_FIELD(viewport,
1009 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1010 REG_GET_FIELD(viewport,
1011 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1018 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1019 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1020 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1025 /* return 0 if the pre-OS buffer uses up most of vram */
1026 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1032 static int gmc_v9_0_sw_init(void *handle)
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037 gfxhub_v1_0_init(adev);
1038 if (adev->asic_type == CHIP_ARCTURUS)
1039 mmhub_v9_4_init(adev);
1041 mmhub_v1_0_init(adev);
1043 spin_lock_init(&adev->gmc.invalidate_lock);
1045 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1046 switch (adev->asic_type) {
1048 adev->num_vmhubs = 2;
1050 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1051 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1053 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1054 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1055 adev->gmc.translate_further =
1056 adev->vm_manager.num_level > 1;
1063 adev->num_vmhubs = 2;
1067 * To fulfill 4-level page support,
1068 * vm size is 256TB (48bit), maximum size of Vega10,
1069 * block size 512 (9bit)
1071 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1072 if (amdgpu_sriov_vf(adev))
1073 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1075 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1078 adev->num_vmhubs = 3;
1080 /* Keep the vm size same with Vega20 */
1081 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1087 /* This interrupt is VMC page fault.*/
1088 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1089 &adev->gmc.vm_fault);
1093 if (adev->asic_type == CHIP_ARCTURUS) {
1094 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1095 &adev->gmc.vm_fault);
1100 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1101 &adev->gmc.vm_fault);
1106 /* interrupt sent to DF. */
1107 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1108 &adev->gmc.ecc_irq);
1112 /* Set the internal MC address mask
1113 * This is the max address of the GPU's
1114 * internal address space.
1116 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1118 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1120 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1123 adev->need_swiotlb = drm_need_swiotlb(44);
1125 if (adev->gmc.xgmi.supported) {
1126 r = gfxhub_v1_1_get_xgmi_info(adev);
1131 r = gmc_v9_0_mc_init(adev);
1135 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1137 /* Memory manager */
1138 r = amdgpu_bo_init(adev);
1142 r = gmc_v9_0_gart_init(adev);
1148 * VMID 0 is reserved for System
1149 * amdgpu graphics/compute will use VMIDs 1-7
1150 * amdkfd will use VMIDs 8-15
1152 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1153 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1154 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1156 amdgpu_vm_manager_init(adev);
1161 static int gmc_v9_0_sw_fini(void *handle)
1163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1164 void *stolen_vga_buf;
1166 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1167 adev->gmc.umc_ras_if) {
1168 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1169 struct ras_ih_if ih_info = {
1173 /* remove fs first */
1174 amdgpu_ras_debugfs_remove(adev, ras_if);
1175 amdgpu_ras_sysfs_remove(adev, ras_if);
1177 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1178 amdgpu_ras_feature_enable(adev, ras_if, 0);
1182 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1183 adev->gmc.mmhub_ras_if) {
1184 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1186 /* remove fs and disable ras feature */
1187 amdgpu_ras_debugfs_remove(adev, ras_if);
1188 amdgpu_ras_sysfs_remove(adev, ras_if);
1189 amdgpu_ras_feature_enable(adev, ras_if, 0);
1193 amdgpu_gem_force_release(adev);
1194 amdgpu_vm_manager_fini(adev);
1196 if (gmc_v9_0_keep_stolen_memory(adev))
1197 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1199 amdgpu_gart_table_vram_free(adev);
1200 amdgpu_bo_fini(adev);
1201 amdgpu_gart_fini(adev);
1206 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1209 switch (adev->asic_type) {
1211 if (amdgpu_sriov_vf(adev))
1215 soc15_program_register_sequence(adev,
1216 golden_settings_mmhub_1_0_0,
1217 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1218 soc15_program_register_sequence(adev,
1219 golden_settings_athub_1_0_0,
1220 ARRAY_SIZE(golden_settings_athub_1_0_0));
1225 /* TODO for renoir */
1226 soc15_program_register_sequence(adev,
1227 golden_settings_athub_1_0_0,
1228 ARRAY_SIZE(golden_settings_athub_1_0_0));
1236 * gmc_v9_0_gart_enable - gart enable
1238 * @adev: amdgpu_device pointer
1240 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1246 amdgpu_device_program_register_sequence(adev,
1247 golden_settings_vega10_hdp,
1248 ARRAY_SIZE(golden_settings_vega10_hdp));
1250 if (adev->gart.bo == NULL) {
1251 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1254 r = amdgpu_gart_table_vram_pin(adev);
1258 switch (adev->asic_type) {
1260 /* TODO for renoir */
1261 mmhub_v1_0_update_power_gating(adev, true);
1267 r = gfxhub_v1_0_gart_enable(adev);
1271 if (adev->asic_type == CHIP_ARCTURUS)
1272 r = mmhub_v9_4_gart_enable(adev);
1274 r = mmhub_v1_0_gart_enable(adev);
1278 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1280 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1281 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1283 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1284 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1286 /* After HDP is initialized, flush HDP.*/
1287 adev->nbio.funcs->hdp_flush(adev, NULL);
1289 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1294 gfxhub_v1_0_set_fault_enable_default(adev, value);
1295 if (adev->asic_type == CHIP_ARCTURUS)
1296 mmhub_v9_4_set_fault_enable_default(adev, value);
1298 mmhub_v1_0_set_fault_enable_default(adev, value);
1300 for (i = 0; i < adev->num_vmhubs; ++i)
1301 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1303 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1304 (unsigned)(adev->gmc.gart_size >> 20),
1305 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1306 adev->gart.ready = true;
1310 static int gmc_v9_0_hw_init(void *handle)
1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1315 /* The sequence of these two function calls matters.*/
1316 gmc_v9_0_init_golden_registers(adev);
1318 if (adev->mode_info.num_crtc) {
1319 /* Lockout access through VGA aperture*/
1320 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1322 /* disable VGA render */
1323 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1326 r = gmc_v9_0_gart_enable(adev);
1332 * gmc_v9_0_gart_disable - gart disable
1334 * @adev: amdgpu_device pointer
1336 * This disables all VM page table.
1338 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1340 gfxhub_v1_0_gart_disable(adev);
1341 if (adev->asic_type == CHIP_ARCTURUS)
1342 mmhub_v9_4_gart_disable(adev);
1344 mmhub_v1_0_gart_disable(adev);
1345 amdgpu_gart_table_vram_unpin(adev);
1348 static int gmc_v9_0_hw_fini(void *handle)
1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352 if (amdgpu_sriov_vf(adev)) {
1353 /* full access mode, so don't touch any GMC register */
1354 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1358 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1359 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1360 gmc_v9_0_gart_disable(adev);
1365 static int gmc_v9_0_suspend(void *handle)
1367 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369 return gmc_v9_0_hw_fini(adev);
1372 static int gmc_v9_0_resume(void *handle)
1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1377 r = gmc_v9_0_hw_init(adev);
1381 amdgpu_vmid_reset_all(adev);
1386 static bool gmc_v9_0_is_idle(void *handle)
1388 /* MC is always ready in GMC v9.*/
1392 static int gmc_v9_0_wait_for_idle(void *handle)
1394 /* There is no need to wait for MC idle in GMC v9.*/
1398 static int gmc_v9_0_soft_reset(void *handle)
1400 /* XXX for emulation.*/
1404 static int gmc_v9_0_set_clockgating_state(void *handle,
1405 enum amd_clockgating_state state)
1407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1409 if (adev->asic_type == CHIP_ARCTURUS)
1410 mmhub_v9_4_set_clockgating(adev, state);
1412 mmhub_v1_0_set_clockgating(adev, state);
1414 athub_v1_0_set_clockgating(adev, state);
1419 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1423 if (adev->asic_type == CHIP_ARCTURUS)
1424 mmhub_v9_4_get_clockgating(adev, flags);
1426 mmhub_v1_0_get_clockgating(adev, flags);
1428 athub_v1_0_get_clockgating(adev, flags);
1431 static int gmc_v9_0_set_powergating_state(void *handle,
1432 enum amd_powergating_state state)
1437 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1439 .early_init = gmc_v9_0_early_init,
1440 .late_init = gmc_v9_0_late_init,
1441 .sw_init = gmc_v9_0_sw_init,
1442 .sw_fini = gmc_v9_0_sw_fini,
1443 .hw_init = gmc_v9_0_hw_init,
1444 .hw_fini = gmc_v9_0_hw_fini,
1445 .suspend = gmc_v9_0_suspend,
1446 .resume = gmc_v9_0_resume,
1447 .is_idle = gmc_v9_0_is_idle,
1448 .wait_for_idle = gmc_v9_0_wait_for_idle,
1449 .soft_reset = gmc_v9_0_soft_reset,
1450 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1451 .set_powergating_state = gmc_v9_0_set_powergating_state,
1452 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1455 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1457 .type = AMD_IP_BLOCK_TYPE_GMC,
1461 .funcs = &gmc_v9_0_ip_funcs,