2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_offset.h"
42 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
55 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
57 #include "amdgpu_ras.h"
58 #include "amdgpu_xgmi.h"
60 /* add these here since we already include dce12 headers and these are for DCN */
61 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
68 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
69 #define AMDGPU_NUM_OF_VMIDS 8
71 static const u32 golden_settings_vega10_hdp[] =
73 0xf64, 0x0fffffff, 0x00000000,
74 0xf65, 0x0fffffff, 0x00000000,
75 0xf66, 0x0fffffff, 0x00000000,
76 0xf67, 0x0fffffff, 0x00000000,
77 0xf68, 0x0fffffff, 0x00000000,
78 0xf6a, 0x0fffffff, 0x00000000,
79 0xf6b, 0x0fffffff, 0x00000000,
80 0xf6c, 0x0fffffff, 0x00000000,
81 0xf6d, 0x0fffffff, 0x00000000,
82 0xf6e, 0x0fffffff, 0x00000000,
85 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
87 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
88 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
91 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
93 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
94 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
97 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
98 (0x000143c0 + 0x00000000),
99 (0x000143c0 + 0x00000800),
100 (0x000143c0 + 0x00001000),
101 (0x000143c0 + 0x00001800),
102 (0x000543c0 + 0x00000000),
103 (0x000543c0 + 0x00000800),
104 (0x000543c0 + 0x00001000),
105 (0x000543c0 + 0x00001800),
106 (0x000943c0 + 0x00000000),
107 (0x000943c0 + 0x00000800),
108 (0x000943c0 + 0x00001000),
109 (0x000943c0 + 0x00001800),
110 (0x000d43c0 + 0x00000000),
111 (0x000d43c0 + 0x00000800),
112 (0x000d43c0 + 0x00001000),
113 (0x000d43c0 + 0x00001800),
114 (0x001143c0 + 0x00000000),
115 (0x001143c0 + 0x00000800),
116 (0x001143c0 + 0x00001000),
117 (0x001143c0 + 0x00001800),
118 (0x001543c0 + 0x00000000),
119 (0x001543c0 + 0x00000800),
120 (0x001543c0 + 0x00001000),
121 (0x001543c0 + 0x00001800),
122 (0x001943c0 + 0x00000000),
123 (0x001943c0 + 0x00000800),
124 (0x001943c0 + 0x00001000),
125 (0x001943c0 + 0x00001800),
126 (0x001d43c0 + 0x00000000),
127 (0x001d43c0 + 0x00000800),
128 (0x001d43c0 + 0x00001000),
129 (0x001d43c0 + 0x00001800),
132 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
133 (0x000143e0 + 0x00000000),
134 (0x000143e0 + 0x00000800),
135 (0x000143e0 + 0x00001000),
136 (0x000143e0 + 0x00001800),
137 (0x000543e0 + 0x00000000),
138 (0x000543e0 + 0x00000800),
139 (0x000543e0 + 0x00001000),
140 (0x000543e0 + 0x00001800),
141 (0x000943e0 + 0x00000000),
142 (0x000943e0 + 0x00000800),
143 (0x000943e0 + 0x00001000),
144 (0x000943e0 + 0x00001800),
145 (0x000d43e0 + 0x00000000),
146 (0x000d43e0 + 0x00000800),
147 (0x000d43e0 + 0x00001000),
148 (0x000d43e0 + 0x00001800),
149 (0x001143e0 + 0x00000000),
150 (0x001143e0 + 0x00000800),
151 (0x001143e0 + 0x00001000),
152 (0x001143e0 + 0x00001800),
153 (0x001543e0 + 0x00000000),
154 (0x001543e0 + 0x00000800),
155 (0x001543e0 + 0x00001000),
156 (0x001543e0 + 0x00001800),
157 (0x001943e0 + 0x00000000),
158 (0x001943e0 + 0x00000800),
159 (0x001943e0 + 0x00001000),
160 (0x001943e0 + 0x00001800),
161 (0x001d43e0 + 0x00000000),
162 (0x001d43e0 + 0x00000800),
163 (0x001d43e0 + 0x00001000),
164 (0x001d43e0 + 0x00001800),
167 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
168 (0x000143c2 + 0x00000000),
169 (0x000143c2 + 0x00000800),
170 (0x000143c2 + 0x00001000),
171 (0x000143c2 + 0x00001800),
172 (0x000543c2 + 0x00000000),
173 (0x000543c2 + 0x00000800),
174 (0x000543c2 + 0x00001000),
175 (0x000543c2 + 0x00001800),
176 (0x000943c2 + 0x00000000),
177 (0x000943c2 + 0x00000800),
178 (0x000943c2 + 0x00001000),
179 (0x000943c2 + 0x00001800),
180 (0x000d43c2 + 0x00000000),
181 (0x000d43c2 + 0x00000800),
182 (0x000d43c2 + 0x00001000),
183 (0x000d43c2 + 0x00001800),
184 (0x001143c2 + 0x00000000),
185 (0x001143c2 + 0x00000800),
186 (0x001143c2 + 0x00001000),
187 (0x001143c2 + 0x00001800),
188 (0x001543c2 + 0x00000000),
189 (0x001543c2 + 0x00000800),
190 (0x001543c2 + 0x00001000),
191 (0x001543c2 + 0x00001800),
192 (0x001943c2 + 0x00000000),
193 (0x001943c2 + 0x00000800),
194 (0x001943c2 + 0x00001000),
195 (0x001943c2 + 0x00001800),
196 (0x001d43c2 + 0x00000000),
197 (0x001d43c2 + 0x00000800),
198 (0x001d43c2 + 0x00001000),
199 (0x001d43c2 + 0x00001800),
202 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
203 struct amdgpu_irq_src *src,
205 enum amdgpu_interrupt_state state)
207 u32 bits, i, tmp, reg;
212 case AMDGPU_IRQ_STATE_DISABLE:
213 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
214 reg = ecc_umc_mcumc_ctrl_addrs[i];
219 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
220 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
226 case AMDGPU_IRQ_STATE_ENABLE:
227 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
228 reg = ecc_umc_mcumc_ctrl_addrs[i];
233 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
234 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
247 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
248 struct ras_err_data *err_data,
249 struct amdgpu_iv_entry *entry)
251 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
252 return AMDGPU_RAS_SUCCESS;
254 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
255 if (adev->umc.funcs &&
256 adev->umc.funcs->query_ras_error_count)
257 adev->umc.funcs->query_ras_error_count(adev, err_data);
259 if (adev->umc.funcs &&
260 adev->umc.funcs->query_ras_error_address &&
261 adev->umc.max_ras_err_cnt_per_query) {
263 kcalloc(adev->umc.max_ras_err_cnt_per_query,
264 sizeof(struct eeprom_table_record), GFP_KERNEL);
265 /* still call query_ras_error_address to clear error status
266 * even NOMEM error is encountered
268 if(!err_data->err_addr)
269 DRM_WARN("Failed to alloc memory for umc error address record!\n");
271 /* umc query_ras_error_address is also responsible for clearing
274 adev->umc.funcs->query_ras_error_address(adev, err_data);
277 /* only uncorrectable error needs gpu reset */
278 if (err_data->ue_count) {
279 if (err_data->err_addr_cnt &&
280 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
281 err_data->err_addr_cnt))
282 DRM_WARN("Failed to add ras bad page!\n");
284 amdgpu_ras_reset_gpu(adev, 0);
287 kfree(err_data->err_addr);
288 return AMDGPU_RAS_SUCCESS;
291 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
292 struct amdgpu_irq_src *source,
293 struct amdgpu_iv_entry *entry)
295 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
296 struct ras_dispatch_if ih_data = {
303 ih_data.head = *ras_if;
305 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
309 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
310 struct amdgpu_irq_src *src,
312 enum amdgpu_interrupt_state state)
314 struct amdgpu_vmhub *hub;
315 u32 tmp, reg, bits, i, j;
317 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
318 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
319 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
320 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
321 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
322 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
323 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
326 case AMDGPU_IRQ_STATE_DISABLE:
327 for (j = 0; j < adev->num_vmhubs; j++) {
328 hub = &adev->vmhub[j];
329 for (i = 0; i < 16; i++) {
330 reg = hub->vm_context0_cntl + i;
337 case AMDGPU_IRQ_STATE_ENABLE:
338 for (j = 0; j < adev->num_vmhubs; j++) {
339 hub = &adev->vmhub[j];
340 for (i = 0; i < 16; i++) {
341 reg = hub->vm_context0_cntl + i;
354 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
355 struct amdgpu_irq_src *source,
356 struct amdgpu_iv_entry *entry)
358 struct amdgpu_vmhub *hub;
359 bool retry_fault = !!(entry->src_data[1] & 0x80);
364 addr = (u64)entry->src_data[0] << 12;
365 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
367 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
369 return 1; /* This also prevents sending it to KFD */
371 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
372 snprintf(hub_name, sizeof(hub_name), "mmhub0");
373 hub = &adev->vmhub[AMDGPU_MMHUB_0];
374 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
375 snprintf(hub_name, sizeof(hub_name), "mmhub1");
376 hub = &adev->vmhub[AMDGPU_MMHUB_1];
378 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
379 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
382 /* If it's the first fault for this address, process it normally */
383 if (!amdgpu_sriov_vf(adev)) {
385 * Issue a dummy read to wait for the status register to
386 * be updated to avoid reading an incorrect value due to
387 * the new fast GRBM interface.
389 if (entry->vmid_src == AMDGPU_GFXHUB_0)
390 RREG32(hub->vm_l2_pro_fault_status);
392 status = RREG32(hub->vm_l2_pro_fault_status);
393 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
396 if (printk_ratelimit()) {
397 struct amdgpu_task_info task_info;
399 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
400 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
403 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
404 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
405 hub_name, retry_fault ? "retry" : "no-retry",
406 entry->src_id, entry->ring_id, entry->vmid,
407 entry->pasid, task_info.process_name, task_info.tgid,
408 task_info.task_name, task_info.pid);
409 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
410 addr, entry->client_id);
411 if (!amdgpu_sriov_vf(adev)) {
413 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
415 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
416 REG_GET_FIELD(status,
417 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
418 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
419 REG_GET_FIELD(status,
420 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
421 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
422 REG_GET_FIELD(status,
423 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
424 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
425 REG_GET_FIELD(status,
426 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
427 dev_err(adev->dev, "\t RW: 0x%lx\n",
428 REG_GET_FIELD(status,
429 VM_L2_PROTECTION_FAULT_STATUS, RW));
437 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
438 .set = gmc_v9_0_vm_fault_interrupt_state,
439 .process = gmc_v9_0_process_interrupt,
443 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
444 .set = gmc_v9_0_ecc_interrupt_state,
445 .process = gmc_v9_0_process_ecc_irq,
448 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
450 adev->gmc.vm_fault.num_types = 1;
451 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
453 adev->gmc.ecc_irq.num_types = 1;
454 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
457 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
462 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
463 PER_VMID_INVALIDATE_REQ, 1 << vmid);
464 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
465 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
466 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
467 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
468 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
469 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
470 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
471 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
478 * VMID 0 is the physical GPU addresses as used by the kernel.
479 * VMIDs 1-15 are used for userspace clients and are handled
480 * by the amdgpu vm/hsa code.
484 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
486 * @adev: amdgpu_device pointer
487 * @vmid: vm instance to flush
488 * @flush_type: the flush type
490 * Flush the TLB for the requested page table using certain type.
492 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
493 uint32_t vmhub, uint32_t flush_type)
495 const unsigned eng = 17;
497 struct amdgpu_vmhub *hub;
499 BUG_ON(vmhub >= adev->num_vmhubs);
501 hub = &adev->vmhub[vmhub];
502 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
504 /* This is necessary for a HW workaround under SRIOV as well
505 * as GFXOFF under bare metal
507 if (adev->gfx.kiq.ring.sched.ready &&
508 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
509 !adev->in_gpu_reset) {
510 uint32_t req = hub->vm_inv_eng0_req + eng;
511 uint32_t ack = hub->vm_inv_eng0_ack + eng;
513 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
518 spin_lock(&adev->gmc.invalidate_lock);
519 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
522 * Issue a dummy read to wait for the ACK register to be cleared
523 * to avoid a false ACK due to the new fast GRBM interface.
525 if (vmhub == AMDGPU_GFXHUB_0)
526 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
528 for (j = 0; j < adev->usec_timeout; j++) {
529 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
530 if (tmp & (1 << vmid))
534 spin_unlock(&adev->gmc.invalidate_lock);
535 if (j < adev->usec_timeout)
538 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
541 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
542 unsigned vmid, uint64_t pd_addr)
544 struct amdgpu_device *adev = ring->adev;
545 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
546 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
547 unsigned eng = ring->vm_inv_eng;
549 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
550 lower_32_bits(pd_addr));
552 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
553 upper_32_bits(pd_addr));
555 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
556 hub->vm_inv_eng0_ack + eng,
562 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
565 struct amdgpu_device *adev = ring->adev;
568 /* Do nothing because there's no lut register for mmhub1. */
569 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
572 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
573 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
575 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
577 amdgpu_ring_emit_wreg(ring, reg, pasid);
581 * PTE format on VEGA 10:
590 * 47:12 4k physical page base address
600 * PDE format on VEGA 10:
601 * 63:59 block fragment size
605 * 47:6 physical base address of PD or PTE
612 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
616 case AMDGPU_VM_MTYPE_DEFAULT:
617 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
618 case AMDGPU_VM_MTYPE_NC:
619 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
620 case AMDGPU_VM_MTYPE_WC:
621 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
622 case AMDGPU_VM_MTYPE_RW:
623 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
624 case AMDGPU_VM_MTYPE_CC:
625 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
626 case AMDGPU_VM_MTYPE_UC:
627 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
629 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
633 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
634 uint64_t *addr, uint64_t *flags)
636 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
637 *addr = adev->vm_manager.vram_base_offset + *addr -
638 adev->gmc.vram_start;
639 BUG_ON(*addr & 0xFFFF00000000003FULL);
641 if (!adev->gmc.translate_further)
644 if (level == AMDGPU_VM_PDB1) {
645 /* Set the block fragment size */
646 if (!(*flags & AMDGPU_PDE_PTE))
647 *flags |= AMDGPU_PDE_BFS(0x9);
649 } else if (level == AMDGPU_VM_PDB0) {
650 if (*flags & AMDGPU_PDE_PTE)
651 *flags &= ~AMDGPU_PDE_PTE;
653 *flags |= AMDGPU_PTE_TF;
657 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
658 struct amdgpu_bo_va_mapping *mapping,
661 *flags &= ~AMDGPU_PTE_EXECUTABLE;
662 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
664 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
665 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
667 if (mapping->flags & AMDGPU_PTE_PRT) {
668 *flags |= AMDGPU_PTE_PRT;
669 *flags &= ~AMDGPU_PTE_VALID;
672 if (adev->asic_type == CHIP_ARCTURUS &&
673 !(*flags & AMDGPU_PTE_SYSTEM) &&
674 mapping->bo_va->is_xgmi)
675 *flags |= AMDGPU_PTE_SNOOPED;
678 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
679 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
680 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
681 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
682 .map_mtype = gmc_v9_0_map_mtype,
683 .get_vm_pde = gmc_v9_0_get_vm_pde,
684 .get_vm_pte = gmc_v9_0_get_vm_pte
687 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
689 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
692 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
694 switch (adev->asic_type) {
696 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
697 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
698 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
699 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
700 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
701 adev->umc.funcs = &umc_v6_1_funcs;
708 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
710 switch (adev->asic_type) {
712 adev->mmhub_funcs = &mmhub_v1_0_funcs;
719 static int gmc_v9_0_early_init(void *handle)
721 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
723 gmc_v9_0_set_gmc_funcs(adev);
724 gmc_v9_0_set_irq_funcs(adev);
725 gmc_v9_0_set_umc_funcs(adev);
726 gmc_v9_0_set_mmhub_funcs(adev);
728 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
729 adev->gmc.shared_aperture_end =
730 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
731 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
732 adev->gmc.private_aperture_end =
733 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
738 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
743 * Currently there is a bug where some memory client outside
744 * of the driver writes to first 8M of VRAM on S3 resume,
745 * this overrides GART which by default gets placed in first 8M and
746 * causes VM_FAULTS once GTT is accessed.
747 * Keep the stolen memory reservation until the while this is not solved.
748 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
750 switch (adev->asic_type) {
763 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
765 struct amdgpu_ring *ring;
766 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
767 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
768 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
770 unsigned vmhub, inv_eng;
772 for (i = 0; i < adev->num_rings; ++i) {
773 ring = adev->rings[i];
774 vmhub = ring->funcs->vmhub;
776 inv_eng = ffs(vm_inv_engs[vmhub]);
778 dev_err(adev->dev, "no VM inv eng for ring %s\n",
783 ring->vm_inv_eng = inv_eng - 1;
784 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
786 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
787 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
793 static int gmc_v9_0_ecc_late_init(void *handle)
796 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797 struct ras_ih_if umc_ih_info = {
798 .cb = gmc_v9_0_process_ras_data_cb,
801 if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
802 r = adev->umc.funcs->ras_late_init(adev, &umc_ih_info);
807 if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
808 r = adev->mmhub_funcs->ras_late_init(adev);
813 return amdgpu_xgmi_ras_late_init(adev);
816 static int gmc_v9_0_late_init(void *handle)
818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821 if (!gmc_v9_0_keep_stolen_memory(adev))
822 amdgpu_bo_late_init(adev);
824 r = gmc_v9_0_allocate_vm_inv_eng(adev);
827 /* Check if ecc is available */
828 if (!amdgpu_sriov_vf(adev)) {
829 switch (adev->asic_type) {
832 r = amdgpu_atomfirmware_mem_ecc_supported(adev);
834 DRM_INFO("ECC is not present.\n");
835 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
836 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
838 DRM_INFO("ECC is active.\n");
841 r = amdgpu_atomfirmware_sram_ecc_supported(adev);
843 DRM_INFO("SRAM ECC is not present.\n");
845 DRM_INFO("SRAM ECC is active.\n");
853 r = gmc_v9_0_ecc_late_init(handle);
857 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
860 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
861 struct amdgpu_gmc *mc)
865 if (adev->asic_type == CHIP_ARCTURUS)
866 base = mmhub_v9_4_get_fb_location(adev);
867 else if (!amdgpu_sriov_vf(adev))
868 base = mmhub_v1_0_get_fb_location(adev);
870 /* add the xgmi offset of the physical node */
871 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
872 amdgpu_gmc_vram_location(adev, mc, base);
873 amdgpu_gmc_gart_location(adev, mc);
874 amdgpu_gmc_agp_location(adev, mc);
875 /* base offset of vram pages */
876 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
878 /* XXX: add the xgmi offset of the physical node? */
879 adev->vm_manager.vram_base_offset +=
880 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
884 * gmc_v9_0_mc_init - initialize the memory controller driver params
886 * @adev: amdgpu_device pointer
888 * Look up the amount of vram, vram width, and decide how to place
889 * vram and gart within the GPU's physical address space.
890 * Returns 0 for success.
892 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
894 int chansize, numchan;
897 if (amdgpu_sriov_vf(adev)) {
898 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
899 * and DF related registers is not readable, seems hardcord is the
900 * only way to set the correct vram_width
902 adev->gmc.vram_width = 2048;
903 } else if (amdgpu_emu_mode != 1) {
904 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
907 if (!adev->gmc.vram_width) {
908 /* hbm memory channel size */
909 if (adev->flags & AMD_IS_APU)
914 numchan = adev->df_funcs->get_hbm_channel_number(adev);
915 adev->gmc.vram_width = numchan * chansize;
918 /* size in MB on si */
919 adev->gmc.mc_vram_size =
920 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
921 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
923 if (!(adev->flags & AMD_IS_APU)) {
924 r = amdgpu_device_resize_fb_bar(adev);
928 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
929 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
932 if (adev->flags & AMD_IS_APU) {
933 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
934 adev->gmc.aper_size = adev->gmc.real_vram_size;
937 /* In case the PCI BAR is larger than the actual amount of vram */
938 adev->gmc.visible_vram_size = adev->gmc.aper_size;
939 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
940 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
942 /* set the gart size */
943 if (amdgpu_gart_size == -1) {
944 switch (adev->asic_type) {
945 case CHIP_VEGA10: /* all engines support GPUVM */
946 case CHIP_VEGA12: /* all engines support GPUVM */
950 adev->gmc.gart_size = 512ULL << 20;
952 case CHIP_RAVEN: /* DCE SG support */
954 adev->gmc.gart_size = 1024ULL << 20;
958 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
961 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
966 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
971 WARN(1, "VEGA10 PCIE GART already initialized\n");
974 /* Initialize common gart structure */
975 r = amdgpu_gart_init(adev);
978 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
979 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
980 AMDGPU_PTE_EXECUTABLE;
981 return amdgpu_gart_table_vram_alloc(adev);
984 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
990 * TODO Remove once GART corruption is resolved
991 * Check related code in gmc_v9_0_sw_fini
993 if (gmc_v9_0_keep_stolen_memory(adev))
994 return 9 * 1024 * 1024;
996 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
997 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
998 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1002 switch (adev->asic_type) {
1005 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1006 size = (REG_GET_FIELD(viewport,
1007 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1008 REG_GET_FIELD(viewport,
1009 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1016 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1017 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1018 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1023 /* return 0 if the pre-OS buffer uses up most of vram */
1024 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1030 static int gmc_v9_0_sw_init(void *handle)
1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035 gfxhub_v1_0_init(adev);
1036 if (adev->asic_type == CHIP_ARCTURUS)
1037 mmhub_v9_4_init(adev);
1039 mmhub_v1_0_init(adev);
1041 spin_lock_init(&adev->gmc.invalidate_lock);
1043 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1044 switch (adev->asic_type) {
1046 adev->num_vmhubs = 2;
1048 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1049 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1051 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1052 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1053 adev->gmc.translate_further =
1054 adev->vm_manager.num_level > 1;
1061 adev->num_vmhubs = 2;
1065 * To fulfill 4-level page support,
1066 * vm size is 256TB (48bit), maximum size of Vega10,
1067 * block size 512 (9bit)
1069 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1070 if (amdgpu_sriov_vf(adev))
1071 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1073 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1076 adev->num_vmhubs = 3;
1078 /* Keep the vm size same with Vega20 */
1079 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1085 /* This interrupt is VMC page fault.*/
1086 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1087 &adev->gmc.vm_fault);
1091 if (adev->asic_type == CHIP_ARCTURUS) {
1092 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1093 &adev->gmc.vm_fault);
1098 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1099 &adev->gmc.vm_fault);
1104 /* interrupt sent to DF. */
1105 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1106 &adev->gmc.ecc_irq);
1110 /* Set the internal MC address mask
1111 * This is the max address of the GPU's
1112 * internal address space.
1114 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1116 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1118 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1121 adev->need_swiotlb = drm_need_swiotlb(44);
1123 if (adev->gmc.xgmi.supported) {
1124 r = gfxhub_v1_1_get_xgmi_info(adev);
1129 r = gmc_v9_0_mc_init(adev);
1133 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1135 /* Memory manager */
1136 r = amdgpu_bo_init(adev);
1140 r = gmc_v9_0_gart_init(adev);
1146 * VMID 0 is reserved for System
1147 * amdgpu graphics/compute will use VMIDs 1-7
1148 * amdkfd will use VMIDs 8-15
1150 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1151 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1152 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1154 amdgpu_vm_manager_init(adev);
1159 static int gmc_v9_0_sw_fini(void *handle)
1161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1162 void *stolen_vga_buf;
1164 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1165 adev->gmc.umc_ras_if) {
1166 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1167 struct ras_ih_if ih_info = {
1171 /* remove fs first */
1172 amdgpu_ras_debugfs_remove(adev, ras_if);
1173 amdgpu_ras_sysfs_remove(adev, ras_if);
1175 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1176 amdgpu_ras_feature_enable(adev, ras_if, 0);
1180 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1181 adev->gmc.mmhub_ras_if) {
1182 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1184 /* remove fs and disable ras feature */
1185 amdgpu_ras_debugfs_remove(adev, ras_if);
1186 amdgpu_ras_sysfs_remove(adev, ras_if);
1187 amdgpu_ras_feature_enable(adev, ras_if, 0);
1191 amdgpu_gem_force_release(adev);
1192 amdgpu_vm_manager_fini(adev);
1194 if (gmc_v9_0_keep_stolen_memory(adev))
1195 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1197 amdgpu_gart_table_vram_free(adev);
1198 amdgpu_bo_fini(adev);
1199 amdgpu_gart_fini(adev);
1204 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1207 switch (adev->asic_type) {
1209 if (amdgpu_sriov_vf(adev))
1213 soc15_program_register_sequence(adev,
1214 golden_settings_mmhub_1_0_0,
1215 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1216 soc15_program_register_sequence(adev,
1217 golden_settings_athub_1_0_0,
1218 ARRAY_SIZE(golden_settings_athub_1_0_0));
1223 /* TODO for renoir */
1224 soc15_program_register_sequence(adev,
1225 golden_settings_athub_1_0_0,
1226 ARRAY_SIZE(golden_settings_athub_1_0_0));
1234 * gmc_v9_0_gart_enable - gart enable
1236 * @adev: amdgpu_device pointer
1238 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1244 amdgpu_device_program_register_sequence(adev,
1245 golden_settings_vega10_hdp,
1246 ARRAY_SIZE(golden_settings_vega10_hdp));
1248 if (adev->gart.bo == NULL) {
1249 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1252 r = amdgpu_gart_table_vram_pin(adev);
1256 switch (adev->asic_type) {
1258 /* TODO for renoir */
1259 mmhub_v1_0_update_power_gating(adev, true);
1265 r = gfxhub_v1_0_gart_enable(adev);
1269 if (adev->asic_type == CHIP_ARCTURUS)
1270 r = mmhub_v9_4_gart_enable(adev);
1272 r = mmhub_v1_0_gart_enable(adev);
1276 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1278 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1279 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1281 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1282 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1284 /* After HDP is initialized, flush HDP.*/
1285 adev->nbio.funcs->hdp_flush(adev, NULL);
1287 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1292 gfxhub_v1_0_set_fault_enable_default(adev, value);
1293 if (adev->asic_type == CHIP_ARCTURUS)
1294 mmhub_v9_4_set_fault_enable_default(adev, value);
1296 mmhub_v1_0_set_fault_enable_default(adev, value);
1298 for (i = 0; i < adev->num_vmhubs; ++i)
1299 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1301 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1302 (unsigned)(adev->gmc.gart_size >> 20),
1303 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1304 adev->gart.ready = true;
1308 static int gmc_v9_0_hw_init(void *handle)
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1313 /* The sequence of these two function calls matters.*/
1314 gmc_v9_0_init_golden_registers(adev);
1316 if (adev->mode_info.num_crtc) {
1317 /* Lockout access through VGA aperture*/
1318 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1320 /* disable VGA render */
1321 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1324 r = gmc_v9_0_gart_enable(adev);
1330 * gmc_v9_0_gart_disable - gart disable
1332 * @adev: amdgpu_device pointer
1334 * This disables all VM page table.
1336 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1338 gfxhub_v1_0_gart_disable(adev);
1339 if (adev->asic_type == CHIP_ARCTURUS)
1340 mmhub_v9_4_gart_disable(adev);
1342 mmhub_v1_0_gart_disable(adev);
1343 amdgpu_gart_table_vram_unpin(adev);
1346 static int gmc_v9_0_hw_fini(void *handle)
1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1350 if (amdgpu_sriov_vf(adev)) {
1351 /* full access mode, so don't touch any GMC register */
1352 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1356 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1357 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1358 gmc_v9_0_gart_disable(adev);
1363 static int gmc_v9_0_suspend(void *handle)
1365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1367 return gmc_v9_0_hw_fini(adev);
1370 static int gmc_v9_0_resume(void *handle)
1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1375 r = gmc_v9_0_hw_init(adev);
1379 amdgpu_vmid_reset_all(adev);
1384 static bool gmc_v9_0_is_idle(void *handle)
1386 /* MC is always ready in GMC v9.*/
1390 static int gmc_v9_0_wait_for_idle(void *handle)
1392 /* There is no need to wait for MC idle in GMC v9.*/
1396 static int gmc_v9_0_soft_reset(void *handle)
1398 /* XXX for emulation.*/
1402 static int gmc_v9_0_set_clockgating_state(void *handle,
1403 enum amd_clockgating_state state)
1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1407 if (adev->asic_type == CHIP_ARCTURUS)
1408 mmhub_v9_4_set_clockgating(adev, state);
1410 mmhub_v1_0_set_clockgating(adev, state);
1412 athub_v1_0_set_clockgating(adev, state);
1417 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1419 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1421 if (adev->asic_type == CHIP_ARCTURUS)
1422 mmhub_v9_4_get_clockgating(adev, flags);
1424 mmhub_v1_0_get_clockgating(adev, flags);
1426 athub_v1_0_get_clockgating(adev, flags);
1429 static int gmc_v9_0_set_powergating_state(void *handle,
1430 enum amd_powergating_state state)
1435 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1437 .early_init = gmc_v9_0_early_init,
1438 .late_init = gmc_v9_0_late_init,
1439 .sw_init = gmc_v9_0_sw_init,
1440 .sw_fini = gmc_v9_0_sw_fini,
1441 .hw_init = gmc_v9_0_hw_init,
1442 .hw_fini = gmc_v9_0_hw_fini,
1443 .suspend = gmc_v9_0_suspend,
1444 .resume = gmc_v9_0_resume,
1445 .is_idle = gmc_v9_0_is_idle,
1446 .wait_for_idle = gmc_v9_0_wait_for_idle,
1447 .soft_reset = gmc_v9_0_soft_reset,
1448 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1449 .set_powergating_state = gmc_v9_0_set_powergating_state,
1450 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1453 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1455 .type = AMD_IP_BLOCK_TYPE_GMC,
1459 .funcs = &gmc_v9_0_ip_funcs,