2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <drm/drm_cache.h>
27 #include "amdgpu_atomfirmware.h"
28 #include "amdgpu_gem.h"
30 #include "hdp/hdp_4_0_offset.h"
31 #include "hdp/hdp_4_0_sh_mask.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "dce/dce_12_0_offset.h"
34 #include "dce/dce_12_0_sh_mask.h"
35 #include "vega10_enum.h"
36 #include "mmhub/mmhub_1_0_offset.h"
37 #include "athub/athub_1_0_offset.h"
38 #include "oss/osssys_4_0_offset.h"
41 #include "soc15_common.h"
42 #include "umc/umc_6_0_sh_mask.h"
44 #include "gfxhub_v1_0.h"
45 #include "mmhub_v1_0.h"
46 #include "gfxhub_v1_1.h"
48 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
50 /* add these here since we already include dce12 headers and these are for DCN */
51 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
52 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
53 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
54 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
55 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
56 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
58 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
59 #define AMDGPU_NUM_OF_VMIDS 8
61 static const u32 golden_settings_vega10_hdp[] =
63 0xf64, 0x0fffffff, 0x00000000,
64 0xf65, 0x0fffffff, 0x00000000,
65 0xf66, 0x0fffffff, 0x00000000,
66 0xf67, 0x0fffffff, 0x00000000,
67 0xf68, 0x0fffffff, 0x00000000,
68 0xf6a, 0x0fffffff, 0x00000000,
69 0xf6b, 0x0fffffff, 0x00000000,
70 0xf6c, 0x0fffffff, 0x00000000,
71 0xf6d, 0x0fffffff, 0x00000000,
72 0xf6e, 0x0fffffff, 0x00000000,
75 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
77 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
78 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
81 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
83 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
84 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
87 /* Ecc related register addresses, (BASE + reg offset) */
88 /* Universal Memory Controller caps (may be fused). */
89 /* UMCCH:UmcLocalCap */
90 #define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
91 #define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
92 #define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
93 #define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
94 #define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
95 #define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
96 #define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
97 #define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
98 #define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
99 #define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
100 #define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
101 #define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
102 #define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
103 #define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
104 #define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
105 #define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
107 /* Universal Memory Controller Channel config. */
108 /* UMCCH:UMC_CONFIG */
109 #define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
110 #define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
111 #define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
112 #define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
113 #define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
114 #define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
115 #define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
116 #define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
117 #define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
118 #define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
119 #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
120 #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
121 #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
122 #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
123 #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
124 #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
126 /* Universal Memory Controller Channel Ecc config. */
128 #define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
129 #define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
130 #define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
131 #define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
132 #define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
133 #define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
134 #define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
135 #define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
136 #define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
137 #define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
138 #define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
139 #define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
140 #define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
141 #define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
142 #define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
143 #define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
145 static const uint32_t ecc_umclocalcap_addrs[] = {
164 static const uint32_t ecc_umcch_umc_config_addrs[] = {
165 UMCCH_UMC_CONFIG_ADDR0,
166 UMCCH_UMC_CONFIG_ADDR1,
167 UMCCH_UMC_CONFIG_ADDR2,
168 UMCCH_UMC_CONFIG_ADDR3,
169 UMCCH_UMC_CONFIG_ADDR4,
170 UMCCH_UMC_CONFIG_ADDR5,
171 UMCCH_UMC_CONFIG_ADDR6,
172 UMCCH_UMC_CONFIG_ADDR7,
173 UMCCH_UMC_CONFIG_ADDR8,
174 UMCCH_UMC_CONFIG_ADDR9,
175 UMCCH_UMC_CONFIG_ADDR10,
176 UMCCH_UMC_CONFIG_ADDR11,
177 UMCCH_UMC_CONFIG_ADDR12,
178 UMCCH_UMC_CONFIG_ADDR13,
179 UMCCH_UMC_CONFIG_ADDR14,
180 UMCCH_UMC_CONFIG_ADDR15,
183 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
194 UMCCH_ECCCTRL_ADDR10,
195 UMCCH_ECCCTRL_ADDR11,
196 UMCCH_ECCCTRL_ADDR12,
197 UMCCH_ECCCTRL_ADDR13,
198 UMCCH_ECCCTRL_ADDR14,
199 UMCCH_ECCCTRL_ADDR15,
202 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
203 struct amdgpu_irq_src *src,
205 enum amdgpu_interrupt_state state)
207 struct amdgpu_vmhub *hub;
208 u32 tmp, reg, bits, i, j;
210 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
211 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
215 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
216 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
219 case AMDGPU_IRQ_STATE_DISABLE:
220 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
221 hub = &adev->vmhub[j];
222 for (i = 0; i < 16; i++) {
223 reg = hub->vm_context0_cntl + i;
230 case AMDGPU_IRQ_STATE_ENABLE:
231 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
232 hub = &adev->vmhub[j];
233 for (i = 0; i < 16; i++) {
234 reg = hub->vm_context0_cntl + i;
247 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
248 struct amdgpu_irq_src *source,
249 struct amdgpu_iv_entry *entry)
251 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
255 addr = (u64)entry->src_data[0] << 12;
256 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
258 if (!amdgpu_sriov_vf(adev)) {
259 status = RREG32(hub->vm_l2_pro_fault_status);
260 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
263 if (printk_ratelimit()) {
264 struct amdgpu_task_info task_info = { 0 };
266 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
269 "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
270 entry->vmid_src ? "mmhub" : "gfxhub",
271 entry->src_id, entry->ring_id, entry->vmid,
272 entry->pasid, task_info.process_name, task_info.tgid,
273 task_info.task_name, task_info.pid);
274 dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n",
275 addr, entry->client_id);
276 if (!amdgpu_sriov_vf(adev))
278 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
285 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
286 .set = gmc_v9_0_vm_fault_interrupt_state,
287 .process = gmc_v9_0_process_interrupt,
290 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
292 adev->gmc.vm_fault.num_types = 1;
293 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
296 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
301 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
302 PER_VMID_INVALIDATE_REQ, 1 << vmid);
303 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
304 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
305 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
306 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
307 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
308 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
309 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
310 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
317 * VMID 0 is the physical GPU addresses as used by the kernel.
318 * VMIDs 1-15 are used for userspace clients and are handled
319 * by the amdgpu vm/hsa code.
323 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
325 * @adev: amdgpu_device pointer
326 * @vmid: vm instance to flush
327 * @flush_type: the flush type
329 * Flush the TLB for the requested page table using certain type.
331 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
332 uint32_t vmid, uint32_t flush_type)
334 const unsigned eng = 17;
337 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338 struct amdgpu_vmhub *hub = &adev->vmhub[i];
339 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
341 /* This is necessary for a HW workaround under SRIOV as well
342 * as GFXOFF under bare metal
344 if (adev->gfx.kiq.ring.sched.ready &&
345 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
346 !adev->in_gpu_reset) {
347 uint32_t req = hub->vm_inv_eng0_req + eng;
348 uint32_t ack = hub->vm_inv_eng0_ack + eng;
350 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
355 spin_lock(&adev->gmc.invalidate_lock);
356 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
357 for (j = 0; j < adev->usec_timeout; j++) {
358 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
359 if (tmp & (1 << vmid))
363 spin_unlock(&adev->gmc.invalidate_lock);
364 if (j < adev->usec_timeout)
367 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
371 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
372 unsigned vmid, uint64_t pd_addr)
374 struct amdgpu_device *adev = ring->adev;
375 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
376 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
377 unsigned eng = ring->vm_inv_eng;
379 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
380 lower_32_bits(pd_addr));
382 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
383 upper_32_bits(pd_addr));
385 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
386 hub->vm_inv_eng0_ack + eng,
392 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
395 struct amdgpu_device *adev = ring->adev;
398 if (ring->funcs->vmhub == AMDGPU_GFXHUB)
399 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
401 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
403 amdgpu_ring_emit_wreg(ring, reg, pasid);
407 * gmc_v9_0_set_pte_pde - update the page tables using MMIO
409 * @adev: amdgpu_device pointer
410 * @cpu_pt_addr: cpu address of the page table
411 * @gpu_page_idx: entry in the page table to update
412 * @addr: dst addr to write into pte/pde
413 * @flags: access flags
415 * Update the page tables using the CPU.
417 static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
418 uint32_t gpu_page_idx, uint64_t addr,
421 void __iomem *ptr = (void *)cpu_pt_addr;
425 * PTE format on VEGA 10:
434 * 47:12 4k physical page base address
444 * PDE format on VEGA 10:
445 * 63:59 block fragment size
449 * 47:6 physical base address of PD or PTE
457 * The following is for PTE only. GART does not have PDEs.
459 value = addr & 0x0000FFFFFFFFF000ULL;
461 writeq(value, ptr + (gpu_page_idx * 8));
465 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
469 uint64_t pte_flag = 0;
471 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
472 pte_flag |= AMDGPU_PTE_EXECUTABLE;
473 if (flags & AMDGPU_VM_PAGE_READABLE)
474 pte_flag |= AMDGPU_PTE_READABLE;
475 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
476 pte_flag |= AMDGPU_PTE_WRITEABLE;
478 switch (flags & AMDGPU_VM_MTYPE_MASK) {
479 case AMDGPU_VM_MTYPE_DEFAULT:
480 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
482 case AMDGPU_VM_MTYPE_NC:
483 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
485 case AMDGPU_VM_MTYPE_WC:
486 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
488 case AMDGPU_VM_MTYPE_CC:
489 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
491 case AMDGPU_VM_MTYPE_UC:
492 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
495 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
499 if (flags & AMDGPU_VM_PAGE_PRT)
500 pte_flag |= AMDGPU_PTE_PRT;
505 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
506 uint64_t *addr, uint64_t *flags)
508 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
509 *addr = adev->vm_manager.vram_base_offset + *addr -
510 adev->gmc.vram_start;
511 BUG_ON(*addr & 0xFFFF00000000003FULL);
513 if (!adev->gmc.translate_further)
516 if (level == AMDGPU_VM_PDB1) {
517 /* Set the block fragment size */
518 if (!(*flags & AMDGPU_PDE_PTE))
519 *flags |= AMDGPU_PDE_BFS(0x9);
521 } else if (level == AMDGPU_VM_PDB0) {
522 if (*flags & AMDGPU_PDE_PTE)
523 *flags &= ~AMDGPU_PDE_PTE;
525 *flags |= AMDGPU_PTE_TF;
529 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
530 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
531 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
532 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
533 .set_pte_pde = gmc_v9_0_set_pte_pde,
534 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
535 .get_vm_pde = gmc_v9_0_get_vm_pde
538 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
540 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
543 static int gmc_v9_0_early_init(void *handle)
545 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
547 gmc_v9_0_set_gmc_funcs(adev);
548 gmc_v9_0_set_irq_funcs(adev);
550 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
551 adev->gmc.shared_aperture_end =
552 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
553 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
554 adev->gmc.private_aperture_end =
555 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
560 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
569 DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
572 for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
573 reg_addr = ecc_umclocalcap_addrs[i];
575 "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
577 reg_val = RREG32(reg_addr);
578 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
585 DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
590 for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
591 reg_addr = ecc_umcch_umc_config_addrs[i];
593 "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
595 reg_val = RREG32(reg_addr);
596 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
600 "DramReady: 0x%08x\n",
604 DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
609 for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
610 reg_addr = ecc_umcch_eccctrl_addrs[i];
612 "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
614 reg_val = RREG32(reg_addr);
615 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
617 fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
623 reg_val, field_val, fv2);
626 DRM_DEBUG("ecc: WrEccEn is not set\n");
630 DRM_DEBUG("ecc: RdEccEn is not set\n");
635 DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
636 return lost_sheep == 0;
639 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
644 * Currently there is a bug where some memory client outside
645 * of the driver writes to first 8M of VRAM on S3 resume,
646 * this overrides GART which by default gets placed in first 8M and
647 * causes VM_FAULTS once GTT is accessed.
648 * Keep the stolen memory reservation until the while this is not solved.
649 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
651 switch (adev->asic_type) {
662 static int gmc_v9_0_late_init(void *handle)
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
666 * The latest engine allocation on gfx9 is:
668 * Engine 2, 3: firmware
669 * Engine 4~13: amdgpu ring, subject to change when ring number changes
671 * Engine 16: kfd tlb invalidation
672 * Engine 17: Gart flushes
674 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
678 if (!gmc_v9_0_keep_stolen_memory(adev))
679 amdgpu_bo_late_init(adev);
681 for(i = 0; i < adev->num_rings; ++i) {
682 struct amdgpu_ring *ring = adev->rings[i];
683 unsigned vmhub = ring->funcs->vmhub;
685 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
686 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
687 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
690 /* Engine 16 is used for KFD and 17 for GART flushes */
691 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
692 BUG_ON(vm_inv_eng[i] > 16);
694 if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
695 r = gmc_v9_0_ecc_available(adev);
697 DRM_INFO("ECC is active.\n");
699 DRM_INFO("ECC is not present.\n");
700 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
702 DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
707 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
710 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
711 struct amdgpu_gmc *mc)
714 if (!amdgpu_sriov_vf(adev))
715 base = mmhub_v1_0_get_fb_location(adev);
716 /* add the xgmi offset of the physical node */
717 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
718 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
719 amdgpu_gmc_gart_location(adev, mc);
720 if (!amdgpu_sriov_vf(adev))
721 amdgpu_gmc_agp_location(adev, mc);
722 /* base offset of vram pages */
723 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
725 /* XXX: add the xgmi offset of the physical node? */
726 adev->vm_manager.vram_base_offset +=
727 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
731 * gmc_v9_0_mc_init - initialize the memory controller driver params
733 * @adev: amdgpu_device pointer
735 * Look up the amount of vram, vram width, and decide how to place
736 * vram and gart within the GPU's physical address space.
737 * Returns 0 for success.
739 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
741 int chansize, numchan;
744 if (amdgpu_emu_mode != 1)
745 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
746 if (!adev->gmc.vram_width) {
747 /* hbm memory channel size */
748 if (adev->flags & AMD_IS_APU)
753 numchan = adev->df_funcs->get_hbm_channel_number(adev);
754 adev->gmc.vram_width = numchan * chansize;
757 /* size in MB on si */
758 adev->gmc.mc_vram_size =
759 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
760 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
762 if (!(adev->flags & AMD_IS_APU)) {
763 r = amdgpu_device_resize_fb_bar(adev);
767 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
768 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
771 if (adev->flags & AMD_IS_APU) {
772 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
773 adev->gmc.aper_size = adev->gmc.real_vram_size;
776 /* In case the PCI BAR is larger than the actual amount of vram */
777 adev->gmc.visible_vram_size = adev->gmc.aper_size;
778 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
779 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
781 /* set the gart size */
782 if (amdgpu_gart_size == -1) {
783 switch (adev->asic_type) {
784 case CHIP_VEGA10: /* all engines support GPUVM */
785 case CHIP_VEGA12: /* all engines support GPUVM */
788 adev->gmc.gart_size = 512ULL << 20;
790 case CHIP_RAVEN: /* DCE SG support */
791 adev->gmc.gart_size = 1024ULL << 20;
795 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
798 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
803 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
808 WARN(1, "VEGA10 PCIE GART already initialized\n");
811 /* Initialize common gart structure */
812 r = amdgpu_gart_init(adev);
815 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
816 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
817 AMDGPU_PTE_EXECUTABLE;
818 return amdgpu_gart_table_vram_alloc(adev);
821 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
823 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
827 * TODO Remove once GART corruption is resolved
828 * Check related code in gmc_v9_0_sw_fini
830 if (gmc_v9_0_keep_stolen_memory(adev))
831 return 9 * 1024 * 1024;
833 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
834 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
838 switch (adev->asic_type) {
840 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
841 size = (REG_GET_FIELD(viewport,
842 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
843 REG_GET_FIELD(viewport,
844 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
851 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
852 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
853 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
858 /* return 0 if the pre-OS buffer uses up most of vram */
859 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
865 static int gmc_v9_0_sw_init(void *handle)
869 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
871 gfxhub_v1_0_init(adev);
872 mmhub_v1_0_init(adev);
874 spin_lock_init(&adev->gmc.invalidate_lock);
876 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
877 switch (adev->asic_type) {
879 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
880 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
882 /* vm_size is 128TB + 512GB for legacy 3-level page support */
883 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
884 adev->gmc.translate_further =
885 adev->vm_manager.num_level > 1;
892 * To fulfill 4-level page support,
893 * vm size is 256TB (48bit), maximum size of Vega10,
894 * block size 512 (9bit)
896 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
902 /* This interrupt is VMC page fault.*/
903 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
904 &adev->gmc.vm_fault);
905 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
906 &adev->gmc.vm_fault);
911 /* Set the internal MC address mask
912 * This is the max address of the GPU's
913 * internal address space.
915 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
917 /* set DMA mask + need_dma32 flags.
918 * PCIE - can handle 44-bits.
919 * IGP - can handle 44-bits
920 * PCI - dma32 for legacy pci gart, 44 bits on vega10
922 adev->need_dma32 = false;
923 dma_bits = adev->need_dma32 ? 32 : 44;
924 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
926 adev->need_dma32 = true;
928 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
930 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
932 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
933 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
935 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
937 if (adev->gmc.xgmi.supported) {
938 r = gfxhub_v1_1_get_xgmi_info(adev);
943 r = gmc_v9_0_mc_init(adev);
947 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
950 r = amdgpu_bo_init(adev);
954 r = gmc_v9_0_gart_init(adev);
960 * VMID 0 is reserved for System
961 * amdgpu graphics/compute will use VMIDs 1-7
962 * amdkfd will use VMIDs 8-15
964 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
965 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
967 amdgpu_vm_manager_init(adev);
972 static int gmc_v9_0_sw_fini(void *handle)
974 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
976 amdgpu_gem_force_release(adev);
977 amdgpu_vm_manager_fini(adev);
979 if (gmc_v9_0_keep_stolen_memory(adev))
980 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
982 amdgpu_gart_table_vram_free(adev);
983 amdgpu_bo_fini(adev);
984 amdgpu_gart_fini(adev);
989 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
992 switch (adev->asic_type) {
995 soc15_program_register_sequence(adev,
996 golden_settings_mmhub_1_0_0,
997 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
998 soc15_program_register_sequence(adev,
999 golden_settings_athub_1_0_0,
1000 ARRAY_SIZE(golden_settings_athub_1_0_0));
1005 soc15_program_register_sequence(adev,
1006 golden_settings_athub_1_0_0,
1007 ARRAY_SIZE(golden_settings_athub_1_0_0));
1015 * gmc_v9_0_gart_enable - gart enable
1017 * @adev: amdgpu_device pointer
1019 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1025 amdgpu_device_program_register_sequence(adev,
1026 golden_settings_vega10_hdp,
1027 ARRAY_SIZE(golden_settings_vega10_hdp));
1029 if (adev->gart.bo == NULL) {
1030 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1033 r = amdgpu_gart_table_vram_pin(adev);
1037 switch (adev->asic_type) {
1039 mmhub_v1_0_update_power_gating(adev, true);
1045 r = gfxhub_v1_0_gart_enable(adev);
1049 r = mmhub_v1_0_gart_enable(adev);
1053 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1055 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1056 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1058 /* After HDP is initialized, flush HDP.*/
1059 adev->nbio_funcs->hdp_flush(adev, NULL);
1061 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1066 gfxhub_v1_0_set_fault_enable_default(adev, value);
1067 mmhub_v1_0_set_fault_enable_default(adev, value);
1068 gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1070 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1071 (unsigned)(adev->gmc.gart_size >> 20),
1072 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1073 adev->gart.ready = true;
1077 static int gmc_v9_0_hw_init(void *handle)
1080 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1082 /* The sequence of these two function calls matters.*/
1083 gmc_v9_0_init_golden_registers(adev);
1085 if (adev->mode_info.num_crtc) {
1086 /* Lockout access through VGA aperture*/
1087 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1089 /* disable VGA render */
1090 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1093 r = gmc_v9_0_gart_enable(adev);
1099 * gmc_v9_0_gart_disable - gart disable
1101 * @adev: amdgpu_device pointer
1103 * This disables all VM page table.
1105 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1107 gfxhub_v1_0_gart_disable(adev);
1108 mmhub_v1_0_gart_disable(adev);
1109 amdgpu_gart_table_vram_unpin(adev);
1112 static int gmc_v9_0_hw_fini(void *handle)
1114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1116 if (amdgpu_sriov_vf(adev)) {
1117 /* full access mode, so don't touch any GMC register */
1118 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1122 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1123 gmc_v9_0_gart_disable(adev);
1128 static int gmc_v9_0_suspend(void *handle)
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1132 return gmc_v9_0_hw_fini(adev);
1135 static int gmc_v9_0_resume(void *handle)
1138 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1140 r = gmc_v9_0_hw_init(adev);
1144 amdgpu_vmid_reset_all(adev);
1149 static bool gmc_v9_0_is_idle(void *handle)
1151 /* MC is always ready in GMC v9.*/
1155 static int gmc_v9_0_wait_for_idle(void *handle)
1157 /* There is no need to wait for MC idle in GMC v9.*/
1161 static int gmc_v9_0_soft_reset(void *handle)
1163 /* XXX for emulation.*/
1167 static int gmc_v9_0_set_clockgating_state(void *handle,
1168 enum amd_clockgating_state state)
1170 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1172 return mmhub_v1_0_set_clockgating(adev, state);
1175 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1179 mmhub_v1_0_get_clockgating(adev, flags);
1182 static int gmc_v9_0_set_powergating_state(void *handle,
1183 enum amd_powergating_state state)
1188 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1190 .early_init = gmc_v9_0_early_init,
1191 .late_init = gmc_v9_0_late_init,
1192 .sw_init = gmc_v9_0_sw_init,
1193 .sw_fini = gmc_v9_0_sw_fini,
1194 .hw_init = gmc_v9_0_hw_init,
1195 .hw_fini = gmc_v9_0_hw_fini,
1196 .suspend = gmc_v9_0_suspend,
1197 .resume = gmc_v9_0_resume,
1198 .is_idle = gmc_v9_0_is_idle,
1199 .wait_for_idle = gmc_v9_0_wait_for_idle,
1200 .soft_reset = gmc_v9_0_soft_reset,
1201 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1202 .set_powergating_state = gmc_v9_0_set_powergating_state,
1203 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1206 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1208 .type = AMD_IP_BLOCK_TYPE_GMC,
1212 .funcs = &gmc_v9_0_ip_funcs,