Merge tag 'drm-misc-next-2018-02-13' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drm_cache.h>
25 #include "amdgpu.h"
26 #include "gmc_v9_0.h"
27 #include "amdgpu_atomfirmware.h"
28
29 #include "hdp/hdp_4_0_offset.h"
30 #include "hdp/hdp_4_0_sh_mask.h"
31 #include "gc/gc_9_0_sh_mask.h"
32 #include "dce/dce_12_0_offset.h"
33 #include "dce/dce_12_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "mmhub/mmhub_1_0_offset.h"
36 #include "athub/athub_1_0_offset.h"
37
38 #include "soc15.h"
39 #include "soc15_common.h"
40 #include "umc/umc_6_0_sh_mask.h"
41
42 #include "gfxhub_v1_0.h"
43 #include "mmhub_v1_0.h"
44
45 #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
46 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
47 //DF_CS_AON0_DramBaseAddress0
48 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
49 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
50 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
51 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
52 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
53 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
54 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
55 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
56 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
57 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
58
59 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
60 #define AMDGPU_NUM_OF_VMIDS                     8
61
62 static const u32 golden_settings_vega10_hdp[] =
63 {
64         0xf64, 0x0fffffff, 0x00000000,
65         0xf65, 0x0fffffff, 0x00000000,
66         0xf66, 0x0fffffff, 0x00000000,
67         0xf67, 0x0fffffff, 0x00000000,
68         0xf68, 0x0fffffff, 0x00000000,
69         0xf6a, 0x0fffffff, 0x00000000,
70         0xf6b, 0x0fffffff, 0x00000000,
71         0xf6c, 0x0fffffff, 0x00000000,
72         0xf6d, 0x0fffffff, 0x00000000,
73         0xf6e, 0x0fffffff, 0x00000000,
74 };
75
76 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
77 {
78         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
79         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
80 };
81
82 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
83 {
84         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
85         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
86 };
87
88 /* Ecc related register addresses, (BASE + reg offset) */
89 /* Universal Memory Controller caps (may be fused). */
90 /* UMCCH:UmcLocalCap */
91 #define UMCLOCALCAPS_ADDR0      (0x00014306 + 0x00000000)
92 #define UMCLOCALCAPS_ADDR1      (0x00014306 + 0x00000800)
93 #define UMCLOCALCAPS_ADDR2      (0x00014306 + 0x00001000)
94 #define UMCLOCALCAPS_ADDR3      (0x00014306 + 0x00001800)
95 #define UMCLOCALCAPS_ADDR4      (0x00054306 + 0x00000000)
96 #define UMCLOCALCAPS_ADDR5      (0x00054306 + 0x00000800)
97 #define UMCLOCALCAPS_ADDR6      (0x00054306 + 0x00001000)
98 #define UMCLOCALCAPS_ADDR7      (0x00054306 + 0x00001800)
99 #define UMCLOCALCAPS_ADDR8      (0x00094306 + 0x00000000)
100 #define UMCLOCALCAPS_ADDR9      (0x00094306 + 0x00000800)
101 #define UMCLOCALCAPS_ADDR10     (0x00094306 + 0x00001000)
102 #define UMCLOCALCAPS_ADDR11     (0x00094306 + 0x00001800)
103 #define UMCLOCALCAPS_ADDR12     (0x000d4306 + 0x00000000)
104 #define UMCLOCALCAPS_ADDR13     (0x000d4306 + 0x00000800)
105 #define UMCLOCALCAPS_ADDR14     (0x000d4306 + 0x00001000)
106 #define UMCLOCALCAPS_ADDR15     (0x000d4306 + 0x00001800)
107
108 /* Universal Memory Controller Channel config. */
109 /* UMCCH:UMC_CONFIG */
110 #define UMCCH_UMC_CONFIG_ADDR0  (0x00014040 + 0x00000000)
111 #define UMCCH_UMC_CONFIG_ADDR1  (0x00014040 + 0x00000800)
112 #define UMCCH_UMC_CONFIG_ADDR2  (0x00014040 + 0x00001000)
113 #define UMCCH_UMC_CONFIG_ADDR3  (0x00014040 + 0x00001800)
114 #define UMCCH_UMC_CONFIG_ADDR4  (0x00054040 + 0x00000000)
115 #define UMCCH_UMC_CONFIG_ADDR5  (0x00054040 + 0x00000800)
116 #define UMCCH_UMC_CONFIG_ADDR6  (0x00054040 + 0x00001000)
117 #define UMCCH_UMC_CONFIG_ADDR7  (0x00054040 + 0x00001800)
118 #define UMCCH_UMC_CONFIG_ADDR8  (0x00094040 + 0x00000000)
119 #define UMCCH_UMC_CONFIG_ADDR9  (0x00094040 + 0x00000800)
120 #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
121 #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
122 #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
123 #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
124 #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
125 #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
126
127 /* Universal Memory Controller Channel Ecc config. */
128 /* UMCCH:EccCtrl */
129 #define UMCCH_ECCCTRL_ADDR0     (0x00014053 + 0x00000000)
130 #define UMCCH_ECCCTRL_ADDR1     (0x00014053 + 0x00000800)
131 #define UMCCH_ECCCTRL_ADDR2     (0x00014053 + 0x00001000)
132 #define UMCCH_ECCCTRL_ADDR3     (0x00014053 + 0x00001800)
133 #define UMCCH_ECCCTRL_ADDR4     (0x00054053 + 0x00000000)
134 #define UMCCH_ECCCTRL_ADDR5     (0x00054053 + 0x00000800)
135 #define UMCCH_ECCCTRL_ADDR6     (0x00054053 + 0x00001000)
136 #define UMCCH_ECCCTRL_ADDR7     (0x00054053 + 0x00001800)
137 #define UMCCH_ECCCTRL_ADDR8     (0x00094053 + 0x00000000)
138 #define UMCCH_ECCCTRL_ADDR9     (0x00094053 + 0x00000800)
139 #define UMCCH_ECCCTRL_ADDR10    (0x00094053 + 0x00001000)
140 #define UMCCH_ECCCTRL_ADDR11    (0x00094053 + 0x00001800)
141 #define UMCCH_ECCCTRL_ADDR12    (0x000d4053 + 0x00000000)
142 #define UMCCH_ECCCTRL_ADDR13    (0x000d4053 + 0x00000800)
143 #define UMCCH_ECCCTRL_ADDR14    (0x000d4053 + 0x00001000)
144 #define UMCCH_ECCCTRL_ADDR15    (0x000d4053 + 0x00001800)
145
146 static const uint32_t ecc_umclocalcap_addrs[] = {
147         UMCLOCALCAPS_ADDR0,
148         UMCLOCALCAPS_ADDR1,
149         UMCLOCALCAPS_ADDR2,
150         UMCLOCALCAPS_ADDR3,
151         UMCLOCALCAPS_ADDR4,
152         UMCLOCALCAPS_ADDR5,
153         UMCLOCALCAPS_ADDR6,
154         UMCLOCALCAPS_ADDR7,
155         UMCLOCALCAPS_ADDR8,
156         UMCLOCALCAPS_ADDR9,
157         UMCLOCALCAPS_ADDR10,
158         UMCLOCALCAPS_ADDR11,
159         UMCLOCALCAPS_ADDR12,
160         UMCLOCALCAPS_ADDR13,
161         UMCLOCALCAPS_ADDR14,
162         UMCLOCALCAPS_ADDR15,
163 };
164
165 static const uint32_t ecc_umcch_umc_config_addrs[] = {
166         UMCCH_UMC_CONFIG_ADDR0,
167         UMCCH_UMC_CONFIG_ADDR1,
168         UMCCH_UMC_CONFIG_ADDR2,
169         UMCCH_UMC_CONFIG_ADDR3,
170         UMCCH_UMC_CONFIG_ADDR4,
171         UMCCH_UMC_CONFIG_ADDR5,
172         UMCCH_UMC_CONFIG_ADDR6,
173         UMCCH_UMC_CONFIG_ADDR7,
174         UMCCH_UMC_CONFIG_ADDR8,
175         UMCCH_UMC_CONFIG_ADDR9,
176         UMCCH_UMC_CONFIG_ADDR10,
177         UMCCH_UMC_CONFIG_ADDR11,
178         UMCCH_UMC_CONFIG_ADDR12,
179         UMCCH_UMC_CONFIG_ADDR13,
180         UMCCH_UMC_CONFIG_ADDR14,
181         UMCCH_UMC_CONFIG_ADDR15,
182 };
183
184 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
185         UMCCH_ECCCTRL_ADDR0,
186         UMCCH_ECCCTRL_ADDR1,
187         UMCCH_ECCCTRL_ADDR2,
188         UMCCH_ECCCTRL_ADDR3,
189         UMCCH_ECCCTRL_ADDR4,
190         UMCCH_ECCCTRL_ADDR5,
191         UMCCH_ECCCTRL_ADDR6,
192         UMCCH_ECCCTRL_ADDR7,
193         UMCCH_ECCCTRL_ADDR8,
194         UMCCH_ECCCTRL_ADDR9,
195         UMCCH_ECCCTRL_ADDR10,
196         UMCCH_ECCCTRL_ADDR11,
197         UMCCH_ECCCTRL_ADDR12,
198         UMCCH_ECCCTRL_ADDR13,
199         UMCCH_ECCCTRL_ADDR14,
200         UMCCH_ECCCTRL_ADDR15,
201 };
202
203 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
204                                         struct amdgpu_irq_src *src,
205                                         unsigned type,
206                                         enum amdgpu_interrupt_state state)
207 {
208         struct amdgpu_vmhub *hub;
209         u32 tmp, reg, bits, i, j;
210
211         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
215                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
216                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
217                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
218
219         switch (state) {
220         case AMDGPU_IRQ_STATE_DISABLE:
221                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
222                         hub = &adev->vmhub[j];
223                         for (i = 0; i < 16; i++) {
224                                 reg = hub->vm_context0_cntl + i;
225                                 tmp = RREG32(reg);
226                                 tmp &= ~bits;
227                                 WREG32(reg, tmp);
228                         }
229                 }
230                 break;
231         case AMDGPU_IRQ_STATE_ENABLE:
232                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
233                         hub = &adev->vmhub[j];
234                         for (i = 0; i < 16; i++) {
235                                 reg = hub->vm_context0_cntl + i;
236                                 tmp = RREG32(reg);
237                                 tmp |= bits;
238                                 WREG32(reg, tmp);
239                         }
240                 }
241         default:
242                 break;
243         }
244
245         return 0;
246 }
247
248 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
249                                 struct amdgpu_irq_src *source,
250                                 struct amdgpu_iv_entry *entry)
251 {
252         struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
253         uint32_t status = 0;
254         u64 addr;
255
256         addr = (u64)entry->src_data[0] << 12;
257         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
258
259         if (!amdgpu_sriov_vf(adev)) {
260                 status = RREG32(hub->vm_l2_pro_fault_status);
261                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
262         }
263
264         if (printk_ratelimit()) {
265                 dev_err(adev->dev,
266                         "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
267                         entry->vmid_src ? "mmhub" : "gfxhub",
268                         entry->src_id, entry->ring_id, entry->vmid,
269                         entry->pas_id);
270                 dev_err(adev->dev, "  at page 0x%016llx from %d\n",
271                         addr, entry->client_id);
272                 if (!amdgpu_sriov_vf(adev))
273                         dev_err(adev->dev,
274                                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
275                                 status);
276         }
277
278         return 0;
279 }
280
281 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
282         .set = gmc_v9_0_vm_fault_interrupt_state,
283         .process = gmc_v9_0_process_interrupt,
284 };
285
286 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
287 {
288         adev->mc.vm_fault.num_types = 1;
289         adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
290 }
291
292 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
293 {
294         u32 req = 0;
295
296         /* invalidate using legacy mode on vmid*/
297         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
298                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
299         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
300         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
301         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
302         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
303         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
304         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
305         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
306                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
307
308         return req;
309 }
310
311 /*
312  * GART
313  * VMID 0 is the physical GPU addresses as used by the kernel.
314  * VMIDs 1-15 are used for userspace clients and are handled
315  * by the amdgpu vm/hsa code.
316  */
317
318 /**
319  * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
320  *
321  * @adev: amdgpu_device pointer
322  * @vmid: vm instance to flush
323  *
324  * Flush the TLB for the requested page table.
325  */
326 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
327                                         uint32_t vmid)
328 {
329         /* Use register 17 for GART */
330         const unsigned eng = 17;
331         unsigned i, j;
332
333         /* flush hdp cache */
334         adev->nbio_funcs->hdp_flush(adev);
335
336         spin_lock(&adev->mc.invalidate_lock);
337
338         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
339                 struct amdgpu_vmhub *hub = &adev->vmhub[i];
340                 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
341
342                 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
343
344                 /* Busy wait for ACK.*/
345                 for (j = 0; j < 100; j++) {
346                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
347                         tmp &= 1 << vmid;
348                         if (tmp)
349                                 break;
350                         cpu_relax();
351                 }
352                 if (j < 100)
353                         continue;
354
355                 /* Wait for ACK with a delay.*/
356                 for (j = 0; j < adev->usec_timeout; j++) {
357                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
358                         tmp &= 1 << vmid;
359                         if (tmp)
360                                 break;
361                         udelay(1);
362                 }
363                 if (j < adev->usec_timeout)
364                         continue;
365
366                 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
367         }
368
369         spin_unlock(&adev->mc.invalidate_lock);
370 }
371
372 /**
373  * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
374  *
375  * @adev: amdgpu_device pointer
376  * @cpu_pt_addr: cpu address of the page table
377  * @gpu_page_idx: entry in the page table to update
378  * @addr: dst addr to write into pte/pde
379  * @flags: access flags
380  *
381  * Update the page tables using the CPU.
382  */
383 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
384                                         void *cpu_pt_addr,
385                                         uint32_t gpu_page_idx,
386                                         uint64_t addr,
387                                         uint64_t flags)
388 {
389         void __iomem *ptr = (void *)cpu_pt_addr;
390         uint64_t value;
391
392         /*
393          * PTE format on VEGA 10:
394          * 63:59 reserved
395          * 58:57 mtype
396          * 56 F
397          * 55 L
398          * 54 P
399          * 53 SW
400          * 52 T
401          * 50:48 reserved
402          * 47:12 4k physical page base address
403          * 11:7 fragment
404          * 6 write
405          * 5 read
406          * 4 exe
407          * 3 Z
408          * 2 snooped
409          * 1 system
410          * 0 valid
411          *
412          * PDE format on VEGA 10:
413          * 63:59 block fragment size
414          * 58:55 reserved
415          * 54 P
416          * 53:48 reserved
417          * 47:6 physical base address of PD or PTE
418          * 5:3 reserved
419          * 2 C
420          * 1 system
421          * 0 valid
422          */
423
424         /*
425          * The following is for PTE only. GART does not have PDEs.
426         */
427         value = addr & 0x0000FFFFFFFFF000ULL;
428         value |= flags;
429         writeq(value, ptr + (gpu_page_idx * 8));
430         return 0;
431 }
432
433 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
434                                                 uint32_t flags)
435
436 {
437         uint64_t pte_flag = 0;
438
439         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
440                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
441         if (flags & AMDGPU_VM_PAGE_READABLE)
442                 pte_flag |= AMDGPU_PTE_READABLE;
443         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
444                 pte_flag |= AMDGPU_PTE_WRITEABLE;
445
446         switch (flags & AMDGPU_VM_MTYPE_MASK) {
447         case AMDGPU_VM_MTYPE_DEFAULT:
448                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
449                 break;
450         case AMDGPU_VM_MTYPE_NC:
451                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
452                 break;
453         case AMDGPU_VM_MTYPE_WC:
454                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
455                 break;
456         case AMDGPU_VM_MTYPE_CC:
457                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
458                 break;
459         case AMDGPU_VM_MTYPE_UC:
460                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
461                 break;
462         default:
463                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
464                 break;
465         }
466
467         if (flags & AMDGPU_VM_PAGE_PRT)
468                 pte_flag |= AMDGPU_PTE_PRT;
469
470         return pte_flag;
471 }
472
473 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
474                                 uint64_t *addr, uint64_t *flags)
475 {
476         if (!(*flags & AMDGPU_PDE_PTE))
477                 *addr = adev->vm_manager.vram_base_offset + *addr -
478                         adev->mc.vram_start;
479         BUG_ON(*addr & 0xFFFF00000000003FULL);
480
481         if (!adev->mc.translate_further)
482                 return;
483
484         if (level == AMDGPU_VM_PDB1) {
485                 /* Set the block fragment size */
486                 if (!(*flags & AMDGPU_PDE_PTE))
487                         *flags |= AMDGPU_PDE_BFS(0x9);
488
489         } else if (level == AMDGPU_VM_PDB0) {
490                 if (*flags & AMDGPU_PDE_PTE)
491                         *flags &= ~AMDGPU_PDE_PTE;
492                 else
493                         *flags |= AMDGPU_PTE_TF;
494         }
495 }
496
497 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
498         .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
499         .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
500         .get_invalidate_req = gmc_v9_0_get_invalidate_req,
501         .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
502         .get_vm_pde = gmc_v9_0_get_vm_pde
503 };
504
505 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
506 {
507         if (adev->gart.gart_funcs == NULL)
508                 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
509 }
510
511 static int gmc_v9_0_early_init(void *handle)
512 {
513         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
514
515         gmc_v9_0_set_gart_funcs(adev);
516         gmc_v9_0_set_irq_funcs(adev);
517
518         adev->mc.shared_aperture_start = 0x2000000000000000ULL;
519         adev->mc.shared_aperture_end =
520                 adev->mc.shared_aperture_start + (4ULL << 30) - 1;
521         adev->mc.private_aperture_start =
522                 adev->mc.shared_aperture_end + 1;
523         adev->mc.private_aperture_end =
524                 adev->mc.private_aperture_start + (4ULL << 30) - 1;
525
526         return 0;
527 }
528
529 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
530 {
531         uint32_t reg_val;
532         uint32_t reg_addr;
533         uint32_t field_val;
534         size_t i;
535         uint32_t fv2;
536         size_t lost_sheep;
537
538         DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
539
540         lost_sheep = 0;
541         for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
542                 reg_addr = ecc_umclocalcap_addrs[i];
543                 DRM_DEBUG("ecc: "
544                           "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
545                           i, reg_addr);
546                 reg_val = RREG32(reg_addr);
547                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
548                                           EccDis);
549                 DRM_DEBUG("ecc: "
550                           "reg_val: 0x%08x, "
551                           "EccDis: 0x%08x, ",
552                           reg_val, field_val);
553                 if (field_val) {
554                         DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
555                         ++lost_sheep;
556                 }
557         }
558
559         for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
560                 reg_addr = ecc_umcch_umc_config_addrs[i];
561                 DRM_DEBUG("ecc: "
562                           "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
563                           i, reg_addr);
564                 reg_val = RREG32(reg_addr);
565                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
566                                           DramReady);
567                 DRM_DEBUG("ecc: "
568                           "reg_val: 0x%08x, "
569                           "DramReady: 0x%08x\n",
570                           reg_val, field_val);
571
572                 if (!field_val) {
573                         DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
574                         ++lost_sheep;
575                 }
576         }
577
578         for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
579                 reg_addr = ecc_umcch_eccctrl_addrs[i];
580                 DRM_DEBUG("ecc: "
581                           "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
582                           i, reg_addr);
583                 reg_val = RREG32(reg_addr);
584                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
585                                           WrEccEn);
586                 fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
587                                     RdEccEn);
588                 DRM_DEBUG("ecc: "
589                           "reg_val: 0x%08x, "
590                           "WrEccEn: 0x%08x, "
591                           "RdEccEn: 0x%08x\n",
592                           reg_val, field_val, fv2);
593
594                 if (!field_val) {
595                         DRM_DEBUG("ecc: WrEccEn is not set\n");
596                         ++lost_sheep;
597                 }
598                 if (!fv2) {
599                         DRM_DEBUG("ecc: RdEccEn is not set\n");
600                         ++lost_sheep;
601                 }
602         }
603
604         DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
605         return lost_sheep == 0;
606 }
607
608 static int gmc_v9_0_late_init(void *handle)
609 {
610         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
611         /*
612          * The latest engine allocation on gfx9 is:
613          * Engine 0, 1: idle
614          * Engine 2, 3: firmware
615          * Engine 4~13: amdgpu ring, subject to change when ring number changes
616          * Engine 14~15: idle
617          * Engine 16: kfd tlb invalidation
618          * Engine 17: Gart flushes
619          */
620         unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
621         unsigned i;
622         int r;
623
624         for(i = 0; i < adev->num_rings; ++i) {
625                 struct amdgpu_ring *ring = adev->rings[i];
626                 unsigned vmhub = ring->funcs->vmhub;
627
628                 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
629                 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
630                          ring->idx, ring->name, ring->vm_inv_eng,
631                          ring->funcs->vmhub);
632         }
633
634         /* Engine 16 is used for KFD and 17 for GART flushes */
635         for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
636                 BUG_ON(vm_inv_eng[i] > 16);
637
638         if (adev->asic_type == CHIP_VEGA10) {
639                 r = gmc_v9_0_ecc_available(adev);
640                 if (r == 1) {
641                         DRM_INFO("ECC is active.\n");
642                 } else if (r == 0) {
643                         DRM_INFO("ECC is not present.\n");
644                 } else {
645                         DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
646                         return r;
647                 }
648         }
649
650         return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
651 }
652
653 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
654                                         struct amdgpu_mc *mc)
655 {
656         u64 base = 0;
657         if (!amdgpu_sriov_vf(adev))
658                 base = mmhub_v1_0_get_fb_location(adev);
659         amdgpu_device_vram_location(adev, &adev->mc, base);
660         amdgpu_device_gart_location(adev, mc);
661         /* base offset of vram pages */
662         if (adev->flags & AMD_IS_APU)
663                 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
664         else
665                 adev->vm_manager.vram_base_offset = 0;
666 }
667
668 /**
669  * gmc_v9_0_mc_init - initialize the memory controller driver params
670  *
671  * @adev: amdgpu_device pointer
672  *
673  * Look up the amount of vram, vram width, and decide how to place
674  * vram and gart within the GPU's physical address space.
675  * Returns 0 for success.
676  */
677 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
678 {
679         u32 tmp;
680         int chansize, numchan;
681         int r;
682
683         adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
684         if (!adev->mc.vram_width) {
685                 /* hbm memory channel size */
686                 chansize = 128;
687
688                 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
689                 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
690                 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
691                 switch (tmp) {
692                 case 0:
693                 default:
694                         numchan = 1;
695                         break;
696                 case 1:
697                         numchan = 2;
698                         break;
699                 case 2:
700                         numchan = 0;
701                         break;
702                 case 3:
703                         numchan = 4;
704                         break;
705                 case 4:
706                         numchan = 0;
707                         break;
708                 case 5:
709                         numchan = 8;
710                         break;
711                 case 6:
712                         numchan = 0;
713                         break;
714                 case 7:
715                         numchan = 16;
716                         break;
717                 case 8:
718                         numchan = 2;
719                         break;
720                 }
721                 adev->mc.vram_width = numchan * chansize;
722         }
723
724         /* size in MB on si */
725         adev->mc.mc_vram_size =
726                 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
727         adev->mc.real_vram_size = adev->mc.mc_vram_size;
728
729         if (!(adev->flags & AMD_IS_APU)) {
730                 r = amdgpu_device_resize_fb_bar(adev);
731                 if (r)
732                         return r;
733         }
734         adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
735         adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
736
737         /* In case the PCI BAR is larger than the actual amount of vram */
738         adev->mc.visible_vram_size = adev->mc.aper_size;
739         if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
740                 adev->mc.visible_vram_size = adev->mc.real_vram_size;
741
742         /* set the gart size */
743         if (amdgpu_gart_size == -1) {
744                 switch (adev->asic_type) {
745                 case CHIP_VEGA10:  /* all engines support GPUVM */
746                 default:
747                         adev->mc.gart_size = 256ULL << 20;
748                         break;
749                 case CHIP_RAVEN:   /* DCE SG support */
750                         adev->mc.gart_size = 1024ULL << 20;
751                         break;
752                 }
753         } else {
754                 adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
755         }
756
757         gmc_v9_0_vram_gtt_location(adev, &adev->mc);
758
759         return 0;
760 }
761
762 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
763 {
764         int r;
765
766         if (adev->gart.robj) {
767                 WARN(1, "VEGA10 PCIE GART already initialized\n");
768                 return 0;
769         }
770         /* Initialize common gart structure */
771         r = amdgpu_gart_init(adev);
772         if (r)
773                 return r;
774         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
775         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
776                                  AMDGPU_PTE_EXECUTABLE;
777         return amdgpu_gart_table_vram_alloc(adev);
778 }
779
780 static int gmc_v9_0_sw_init(void *handle)
781 {
782         int r;
783         int dma_bits;
784         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
785
786         gfxhub_v1_0_init(adev);
787         mmhub_v1_0_init(adev);
788
789         spin_lock_init(&adev->mc.invalidate_lock);
790
791         switch (adev->asic_type) {
792         case CHIP_RAVEN:
793                 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
794                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
795                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
796                 } else {
797                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
798                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
799                         adev->mc.translate_further =
800                                 adev->vm_manager.num_level > 1;
801                 }
802                 break;
803         case CHIP_VEGA10:
804                 /* XXX Don't know how to get VRAM type yet. */
805                 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
806                 /*
807                  * To fulfill 4-level page support,
808                  * vm size is 256TB (48bit), maximum size of Vega10,
809                  * block size 512 (9bit)
810                  */
811                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
812                 break;
813         default:
814                 break;
815         }
816
817         /* This interrupt is VMC page fault.*/
818         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
819                                 &adev->mc.vm_fault);
820         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
821                                 &adev->mc.vm_fault);
822
823         if (r)
824                 return r;
825
826         /* Set the internal MC address mask
827          * This is the max address of the GPU's
828          * internal address space.
829          */
830         adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
831
832         /*
833          * It needs to reserve 8M stolen memory for vega10
834          * TODO: Figure out how to avoid that...
835          */
836         adev->mc.stolen_size = 8 * 1024 * 1024;
837
838         /* set DMA mask + need_dma32 flags.
839          * PCIE - can handle 44-bits.
840          * IGP - can handle 44-bits
841          * PCI - dma32 for legacy pci gart, 44 bits on vega10
842          */
843         adev->need_dma32 = false;
844         dma_bits = adev->need_dma32 ? 32 : 44;
845         r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
846         if (r) {
847                 adev->need_dma32 = true;
848                 dma_bits = 32;
849                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
850         }
851         r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
852         if (r) {
853                 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
854                 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
855         }
856         adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
857
858         r = gmc_v9_0_mc_init(adev);
859         if (r)
860                 return r;
861
862         /* Memory manager */
863         r = amdgpu_bo_init(adev);
864         if (r)
865                 return r;
866
867         r = gmc_v9_0_gart_init(adev);
868         if (r)
869                 return r;
870
871         /*
872          * number of VMs
873          * VMID 0 is reserved for System
874          * amdgpu graphics/compute will use VMIDs 1-7
875          * amdkfd will use VMIDs 8-15
876          */
877         adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
878         adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
879
880         amdgpu_vm_manager_init(adev);
881
882         return 0;
883 }
884
885 /**
886  * gmc_v9_0_gart_fini - vm fini callback
887  *
888  * @adev: amdgpu_device pointer
889  *
890  * Tears down the driver GART/VM setup (CIK).
891  */
892 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
893 {
894         amdgpu_gart_table_vram_free(adev);
895         amdgpu_gart_fini(adev);
896 }
897
898 static int gmc_v9_0_sw_fini(void *handle)
899 {
900         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902         amdgpu_gem_force_release(adev);
903         amdgpu_vm_manager_fini(adev);
904         gmc_v9_0_gart_fini(adev);
905         amdgpu_bo_fini(adev);
906
907         return 0;
908 }
909
910 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
911 {
912
913         switch (adev->asic_type) {
914         case CHIP_VEGA10:
915                 soc15_program_register_sequence(adev,
916                                                 golden_settings_mmhub_1_0_0,
917                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
918                 soc15_program_register_sequence(adev,
919                                                 golden_settings_athub_1_0_0,
920                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
921                 break;
922         case CHIP_RAVEN:
923                 soc15_program_register_sequence(adev,
924                                                 golden_settings_athub_1_0_0,
925                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
926                 break;
927         default:
928                 break;
929         }
930 }
931
932 /**
933  * gmc_v9_0_gart_enable - gart enable
934  *
935  * @adev: amdgpu_device pointer
936  */
937 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
938 {
939         int r;
940         bool value;
941         u32 tmp;
942
943         amdgpu_device_program_register_sequence(adev,
944                                                 golden_settings_vega10_hdp,
945                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
946
947         if (adev->gart.robj == NULL) {
948                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
949                 return -EINVAL;
950         }
951         r = amdgpu_gart_table_vram_pin(adev);
952         if (r)
953                 return r;
954
955         switch (adev->asic_type) {
956         case CHIP_RAVEN:
957                 mmhub_v1_0_initialize_power_gating(adev);
958                 mmhub_v1_0_update_power_gating(adev, true);
959                 break;
960         default:
961                 break;
962         }
963
964         r = gfxhub_v1_0_gart_enable(adev);
965         if (r)
966                 return r;
967
968         r = mmhub_v1_0_gart_enable(adev);
969         if (r)
970                 return r;
971
972         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
973
974         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
975         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
976
977         /* After HDP is initialized, flush HDP.*/
978         adev->nbio_funcs->hdp_flush(adev);
979
980         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
981                 value = false;
982         else
983                 value = true;
984
985         gfxhub_v1_0_set_fault_enable_default(adev, value);
986         mmhub_v1_0_set_fault_enable_default(adev, value);
987         gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
988
989         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
990                  (unsigned)(adev->mc.gart_size >> 20),
991                  (unsigned long long)adev->gart.table_addr);
992         adev->gart.ready = true;
993         return 0;
994 }
995
996 static int gmc_v9_0_hw_init(void *handle)
997 {
998         int r;
999         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1000
1001         /* The sequence of these two function calls matters.*/
1002         gmc_v9_0_init_golden_registers(adev);
1003
1004         if (adev->mode_info.num_crtc) {
1005                 /* Lockout access through VGA aperture*/
1006                 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1007
1008                 /* disable VGA render */
1009                 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1010         }
1011
1012         r = gmc_v9_0_gart_enable(adev);
1013
1014         return r;
1015 }
1016
1017 /**
1018  * gmc_v9_0_gart_disable - gart disable
1019  *
1020  * @adev: amdgpu_device pointer
1021  *
1022  * This disables all VM page table.
1023  */
1024 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1025 {
1026         gfxhub_v1_0_gart_disable(adev);
1027         mmhub_v1_0_gart_disable(adev);
1028         amdgpu_gart_table_vram_unpin(adev);
1029 }
1030
1031 static int gmc_v9_0_hw_fini(void *handle)
1032 {
1033         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1034
1035         if (amdgpu_sriov_vf(adev)) {
1036                 /* full access mode, so don't touch any GMC register */
1037                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1038                 return 0;
1039         }
1040
1041         amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1042         gmc_v9_0_gart_disable(adev);
1043
1044         return 0;
1045 }
1046
1047 static int gmc_v9_0_suspend(void *handle)
1048 {
1049         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050
1051         return gmc_v9_0_hw_fini(adev);
1052 }
1053
1054 static int gmc_v9_0_resume(void *handle)
1055 {
1056         int r;
1057         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1058
1059         r = gmc_v9_0_hw_init(adev);
1060         if (r)
1061                 return r;
1062
1063         amdgpu_vmid_reset_all(adev);
1064
1065         return 0;
1066 }
1067
1068 static bool gmc_v9_0_is_idle(void *handle)
1069 {
1070         /* MC is always ready in GMC v9.*/
1071         return true;
1072 }
1073
1074 static int gmc_v9_0_wait_for_idle(void *handle)
1075 {
1076         /* There is no need to wait for MC idle in GMC v9.*/
1077         return 0;
1078 }
1079
1080 static int gmc_v9_0_soft_reset(void *handle)
1081 {
1082         /* XXX for emulation.*/
1083         return 0;
1084 }
1085
1086 static int gmc_v9_0_set_clockgating_state(void *handle,
1087                                         enum amd_clockgating_state state)
1088 {
1089         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1090
1091         return mmhub_v1_0_set_clockgating(adev, state);
1092 }
1093
1094 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1095 {
1096         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1097
1098         mmhub_v1_0_get_clockgating(adev, flags);
1099 }
1100
1101 static int gmc_v9_0_set_powergating_state(void *handle,
1102                                         enum amd_powergating_state state)
1103 {
1104         return 0;
1105 }
1106
1107 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1108         .name = "gmc_v9_0",
1109         .early_init = gmc_v9_0_early_init,
1110         .late_init = gmc_v9_0_late_init,
1111         .sw_init = gmc_v9_0_sw_init,
1112         .sw_fini = gmc_v9_0_sw_fini,
1113         .hw_init = gmc_v9_0_hw_init,
1114         .hw_fini = gmc_v9_0_hw_fini,
1115         .suspend = gmc_v9_0_suspend,
1116         .resume = gmc_v9_0_resume,
1117         .is_idle = gmc_v9_0_is_idle,
1118         .wait_for_idle = gmc_v9_0_wait_for_idle,
1119         .soft_reset = gmc_v9_0_soft_reset,
1120         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1121         .set_powergating_state = gmc_v9_0_set_powergating_state,
1122         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1123 };
1124
1125 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1126 {
1127         .type = AMD_IP_BLOCK_TYPE_GMC,
1128         .major = 9,
1129         .minor = 0,
1130         .rev = 0,
1131         .funcs = &gmc_v9_0_ip_funcs,
1132 };