ed3145b2a596fa575503ebc4c35ea598dec601bf
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drm_cache.h>
25 #include "amdgpu.h"
26 #include "gmc_v9_0.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "amdgpu_gem.h"
29
30 #include "hdp/hdp_4_0_offset.h"
31 #include "hdp/hdp_4_0_sh_mask.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "dce/dce_12_0_offset.h"
34 #include "dce/dce_12_0_sh_mask.h"
35 #include "vega10_enum.h"
36 #include "mmhub/mmhub_1_0_offset.h"
37 #include "athub/athub_1_0_offset.h"
38 #include "oss/osssys_4_0_offset.h"
39
40 #include "soc15.h"
41 #include "soc15_common.h"
42 #include "umc/umc_6_0_sh_mask.h"
43
44 #include "gfxhub_v1_0.h"
45 #include "mmhub_v1_0.h"
46 #include "gfxhub_v1_1.h"
47
48 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
49
50 /* add these here since we already include dce12 headers and these are for DCN */
51 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
52 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
53 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
54 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
55 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
56 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
57
58 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
59 #define AMDGPU_NUM_OF_VMIDS                     8
60
61 static const u32 golden_settings_vega10_hdp[] =
62 {
63         0xf64, 0x0fffffff, 0x00000000,
64         0xf65, 0x0fffffff, 0x00000000,
65         0xf66, 0x0fffffff, 0x00000000,
66         0xf67, 0x0fffffff, 0x00000000,
67         0xf68, 0x0fffffff, 0x00000000,
68         0xf6a, 0x0fffffff, 0x00000000,
69         0xf6b, 0x0fffffff, 0x00000000,
70         0xf6c, 0x0fffffff, 0x00000000,
71         0xf6d, 0x0fffffff, 0x00000000,
72         0xf6e, 0x0fffffff, 0x00000000,
73 };
74
75 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
76 {
77         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
78         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
79 };
80
81 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
82 {
83         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
84         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
85 };
86
87 /* Ecc related register addresses, (BASE + reg offset) */
88 /* Universal Memory Controller caps (may be fused). */
89 /* UMCCH:UmcLocalCap */
90 #define UMCLOCALCAPS_ADDR0      (0x00014306 + 0x00000000)
91 #define UMCLOCALCAPS_ADDR1      (0x00014306 + 0x00000800)
92 #define UMCLOCALCAPS_ADDR2      (0x00014306 + 0x00001000)
93 #define UMCLOCALCAPS_ADDR3      (0x00014306 + 0x00001800)
94 #define UMCLOCALCAPS_ADDR4      (0x00054306 + 0x00000000)
95 #define UMCLOCALCAPS_ADDR5      (0x00054306 + 0x00000800)
96 #define UMCLOCALCAPS_ADDR6      (0x00054306 + 0x00001000)
97 #define UMCLOCALCAPS_ADDR7      (0x00054306 + 0x00001800)
98 #define UMCLOCALCAPS_ADDR8      (0x00094306 + 0x00000000)
99 #define UMCLOCALCAPS_ADDR9      (0x00094306 + 0x00000800)
100 #define UMCLOCALCAPS_ADDR10     (0x00094306 + 0x00001000)
101 #define UMCLOCALCAPS_ADDR11     (0x00094306 + 0x00001800)
102 #define UMCLOCALCAPS_ADDR12     (0x000d4306 + 0x00000000)
103 #define UMCLOCALCAPS_ADDR13     (0x000d4306 + 0x00000800)
104 #define UMCLOCALCAPS_ADDR14     (0x000d4306 + 0x00001000)
105 #define UMCLOCALCAPS_ADDR15     (0x000d4306 + 0x00001800)
106
107 /* Universal Memory Controller Channel config. */
108 /* UMCCH:UMC_CONFIG */
109 #define UMCCH_UMC_CONFIG_ADDR0  (0x00014040 + 0x00000000)
110 #define UMCCH_UMC_CONFIG_ADDR1  (0x00014040 + 0x00000800)
111 #define UMCCH_UMC_CONFIG_ADDR2  (0x00014040 + 0x00001000)
112 #define UMCCH_UMC_CONFIG_ADDR3  (0x00014040 + 0x00001800)
113 #define UMCCH_UMC_CONFIG_ADDR4  (0x00054040 + 0x00000000)
114 #define UMCCH_UMC_CONFIG_ADDR5  (0x00054040 + 0x00000800)
115 #define UMCCH_UMC_CONFIG_ADDR6  (0x00054040 + 0x00001000)
116 #define UMCCH_UMC_CONFIG_ADDR7  (0x00054040 + 0x00001800)
117 #define UMCCH_UMC_CONFIG_ADDR8  (0x00094040 + 0x00000000)
118 #define UMCCH_UMC_CONFIG_ADDR9  (0x00094040 + 0x00000800)
119 #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
120 #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
121 #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
122 #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
123 #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
124 #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
125
126 /* Universal Memory Controller Channel Ecc config. */
127 /* UMCCH:EccCtrl */
128 #define UMCCH_ECCCTRL_ADDR0     (0x00014053 + 0x00000000)
129 #define UMCCH_ECCCTRL_ADDR1     (0x00014053 + 0x00000800)
130 #define UMCCH_ECCCTRL_ADDR2     (0x00014053 + 0x00001000)
131 #define UMCCH_ECCCTRL_ADDR3     (0x00014053 + 0x00001800)
132 #define UMCCH_ECCCTRL_ADDR4     (0x00054053 + 0x00000000)
133 #define UMCCH_ECCCTRL_ADDR5     (0x00054053 + 0x00000800)
134 #define UMCCH_ECCCTRL_ADDR6     (0x00054053 + 0x00001000)
135 #define UMCCH_ECCCTRL_ADDR7     (0x00054053 + 0x00001800)
136 #define UMCCH_ECCCTRL_ADDR8     (0x00094053 + 0x00000000)
137 #define UMCCH_ECCCTRL_ADDR9     (0x00094053 + 0x00000800)
138 #define UMCCH_ECCCTRL_ADDR10    (0x00094053 + 0x00001000)
139 #define UMCCH_ECCCTRL_ADDR11    (0x00094053 + 0x00001800)
140 #define UMCCH_ECCCTRL_ADDR12    (0x000d4053 + 0x00000000)
141 #define UMCCH_ECCCTRL_ADDR13    (0x000d4053 + 0x00000800)
142 #define UMCCH_ECCCTRL_ADDR14    (0x000d4053 + 0x00001000)
143 #define UMCCH_ECCCTRL_ADDR15    (0x000d4053 + 0x00001800)
144
145 static const uint32_t ecc_umclocalcap_addrs[] = {
146         UMCLOCALCAPS_ADDR0,
147         UMCLOCALCAPS_ADDR1,
148         UMCLOCALCAPS_ADDR2,
149         UMCLOCALCAPS_ADDR3,
150         UMCLOCALCAPS_ADDR4,
151         UMCLOCALCAPS_ADDR5,
152         UMCLOCALCAPS_ADDR6,
153         UMCLOCALCAPS_ADDR7,
154         UMCLOCALCAPS_ADDR8,
155         UMCLOCALCAPS_ADDR9,
156         UMCLOCALCAPS_ADDR10,
157         UMCLOCALCAPS_ADDR11,
158         UMCLOCALCAPS_ADDR12,
159         UMCLOCALCAPS_ADDR13,
160         UMCLOCALCAPS_ADDR14,
161         UMCLOCALCAPS_ADDR15,
162 };
163
164 static const uint32_t ecc_umcch_umc_config_addrs[] = {
165         UMCCH_UMC_CONFIG_ADDR0,
166         UMCCH_UMC_CONFIG_ADDR1,
167         UMCCH_UMC_CONFIG_ADDR2,
168         UMCCH_UMC_CONFIG_ADDR3,
169         UMCCH_UMC_CONFIG_ADDR4,
170         UMCCH_UMC_CONFIG_ADDR5,
171         UMCCH_UMC_CONFIG_ADDR6,
172         UMCCH_UMC_CONFIG_ADDR7,
173         UMCCH_UMC_CONFIG_ADDR8,
174         UMCCH_UMC_CONFIG_ADDR9,
175         UMCCH_UMC_CONFIG_ADDR10,
176         UMCCH_UMC_CONFIG_ADDR11,
177         UMCCH_UMC_CONFIG_ADDR12,
178         UMCCH_UMC_CONFIG_ADDR13,
179         UMCCH_UMC_CONFIG_ADDR14,
180         UMCCH_UMC_CONFIG_ADDR15,
181 };
182
183 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
184         UMCCH_ECCCTRL_ADDR0,
185         UMCCH_ECCCTRL_ADDR1,
186         UMCCH_ECCCTRL_ADDR2,
187         UMCCH_ECCCTRL_ADDR3,
188         UMCCH_ECCCTRL_ADDR4,
189         UMCCH_ECCCTRL_ADDR5,
190         UMCCH_ECCCTRL_ADDR6,
191         UMCCH_ECCCTRL_ADDR7,
192         UMCCH_ECCCTRL_ADDR8,
193         UMCCH_ECCCTRL_ADDR9,
194         UMCCH_ECCCTRL_ADDR10,
195         UMCCH_ECCCTRL_ADDR11,
196         UMCCH_ECCCTRL_ADDR12,
197         UMCCH_ECCCTRL_ADDR13,
198         UMCCH_ECCCTRL_ADDR14,
199         UMCCH_ECCCTRL_ADDR15,
200 };
201
202 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
203                                         struct amdgpu_irq_src *src,
204                                         unsigned type,
205                                         enum amdgpu_interrupt_state state)
206 {
207         struct amdgpu_vmhub *hub;
208         u32 tmp, reg, bits, i, j;
209
210         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
211                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
215                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
216                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
217
218         switch (state) {
219         case AMDGPU_IRQ_STATE_DISABLE:
220                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
221                         hub = &adev->vmhub[j];
222                         for (i = 0; i < 16; i++) {
223                                 reg = hub->vm_context0_cntl + i;
224                                 tmp = RREG32(reg);
225                                 tmp &= ~bits;
226                                 WREG32(reg, tmp);
227                         }
228                 }
229                 break;
230         case AMDGPU_IRQ_STATE_ENABLE:
231                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
232                         hub = &adev->vmhub[j];
233                         for (i = 0; i < 16; i++) {
234                                 reg = hub->vm_context0_cntl + i;
235                                 tmp = RREG32(reg);
236                                 tmp |= bits;
237                                 WREG32(reg, tmp);
238                         }
239                 }
240         default:
241                 break;
242         }
243
244         return 0;
245 }
246
247 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
248                                 struct amdgpu_irq_src *source,
249                                 struct amdgpu_iv_entry *entry)
250 {
251         struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
252         uint32_t status = 0;
253         u64 addr;
254
255         addr = (u64)entry->src_data[0] << 12;
256         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
257
258         if (!amdgpu_sriov_vf(adev)) {
259                 status = RREG32(hub->vm_l2_pro_fault_status);
260                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
261         }
262
263         if (printk_ratelimit()) {
264                 struct amdgpu_task_info task_info = { 0 };
265
266                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
267
268                 dev_err(adev->dev,
269                         "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
270                         entry->vmid_src ? "mmhub" : "gfxhub",
271                         entry->src_id, entry->ring_id, entry->vmid,
272                         entry->pasid, task_info.process_name, task_info.tgid,
273                         task_info.task_name, task_info.pid);
274                 dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
275                         addr, entry->client_id);
276                 if (!amdgpu_sriov_vf(adev))
277                         dev_err(adev->dev,
278                                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
279                                 status);
280         }
281
282         return 0;
283 }
284
285 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
286         .set = gmc_v9_0_vm_fault_interrupt_state,
287         .process = gmc_v9_0_process_interrupt,
288 };
289
290 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
291 {
292         adev->gmc.vm_fault.num_types = 1;
293         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
294 }
295
296 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
297                                         uint32_t flush_type)
298 {
299         u32 req = 0;
300
301         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
302                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
303         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
304         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
305         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
306         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
307         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
308         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
309         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
310                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
311
312         return req;
313 }
314
315 /*
316  * GART
317  * VMID 0 is the physical GPU addresses as used by the kernel.
318  * VMIDs 1-15 are used for userspace clients and are handled
319  * by the amdgpu vm/hsa code.
320  */
321
322 /**
323  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
324  *
325  * @adev: amdgpu_device pointer
326  * @vmid: vm instance to flush
327  * @flush_type: the flush type
328  *
329  * Flush the TLB for the requested page table using certain type.
330  */
331 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
332                                 uint32_t vmid, uint32_t flush_type)
333 {
334         const unsigned eng = 17;
335         unsigned i, j;
336
337         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338                 struct amdgpu_vmhub *hub = &adev->vmhub[i];
339                 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
340
341                 /* This is necessary for a HW workaround under SRIOV as well
342                  * as GFXOFF under bare metal
343                  */
344                 if (adev->gfx.kiq.ring.sched.ready &&
345                     (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
346                     !adev->in_gpu_reset) {
347                         uint32_t req = hub->vm_inv_eng0_req + eng;
348                         uint32_t ack = hub->vm_inv_eng0_ack + eng;
349
350                         amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
351                                                            1 << vmid);
352                         continue;
353                 }
354
355                 spin_lock(&adev->gmc.invalidate_lock);
356                 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
357                 for (j = 0; j < adev->usec_timeout; j++) {
358                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
359                         if (tmp & (1 << vmid))
360                                 break;
361                         udelay(1);
362                 }
363                 spin_unlock(&adev->gmc.invalidate_lock);
364                 if (j < adev->usec_timeout)
365                         continue;
366
367                 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
368         }
369 }
370
371 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
372                                             unsigned vmid, uint64_t pd_addr)
373 {
374         struct amdgpu_device *adev = ring->adev;
375         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
376         uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
377         unsigned eng = ring->vm_inv_eng;
378
379         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
380                               lower_32_bits(pd_addr));
381
382         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
383                               upper_32_bits(pd_addr));
384
385         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
386                                             hub->vm_inv_eng0_ack + eng,
387                                             req, 1 << vmid);
388
389         return pd_addr;
390 }
391
392 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
393                                         unsigned pasid)
394 {
395         struct amdgpu_device *adev = ring->adev;
396         uint32_t reg;
397
398         if (ring->funcs->vmhub == AMDGPU_GFXHUB)
399                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
400         else
401                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
402
403         amdgpu_ring_emit_wreg(ring, reg, pasid);
404 }
405
406 /**
407  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
408  *
409  * @adev: amdgpu_device pointer
410  * @cpu_pt_addr: cpu address of the page table
411  * @gpu_page_idx: entry in the page table to update
412  * @addr: dst addr to write into pte/pde
413  * @flags: access flags
414  *
415  * Update the page tables using the CPU.
416  */
417 static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
418                                 uint32_t gpu_page_idx, uint64_t addr,
419                                 uint64_t flags)
420 {
421         void __iomem *ptr = (void *)cpu_pt_addr;
422         uint64_t value;
423
424         /*
425          * PTE format on VEGA 10:
426          * 63:59 reserved
427          * 58:57 mtype
428          * 56 F
429          * 55 L
430          * 54 P
431          * 53 SW
432          * 52 T
433          * 50:48 reserved
434          * 47:12 4k physical page base address
435          * 11:7 fragment
436          * 6 write
437          * 5 read
438          * 4 exe
439          * 3 Z
440          * 2 snooped
441          * 1 system
442          * 0 valid
443          *
444          * PDE format on VEGA 10:
445          * 63:59 block fragment size
446          * 58:55 reserved
447          * 54 P
448          * 53:48 reserved
449          * 47:6 physical base address of PD or PTE
450          * 5:3 reserved
451          * 2 C
452          * 1 system
453          * 0 valid
454          */
455
456         /*
457          * The following is for PTE only. GART does not have PDEs.
458         */
459         value = addr & 0x0000FFFFFFFFF000ULL;
460         value |= flags;
461         writeq(value, ptr + (gpu_page_idx * 8));
462         return 0;
463 }
464
465 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
466                                                 uint32_t flags)
467
468 {
469         uint64_t pte_flag = 0;
470
471         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
472                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
473         if (flags & AMDGPU_VM_PAGE_READABLE)
474                 pte_flag |= AMDGPU_PTE_READABLE;
475         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
476                 pte_flag |= AMDGPU_PTE_WRITEABLE;
477
478         switch (flags & AMDGPU_VM_MTYPE_MASK) {
479         case AMDGPU_VM_MTYPE_DEFAULT:
480                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
481                 break;
482         case AMDGPU_VM_MTYPE_NC:
483                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
484                 break;
485         case AMDGPU_VM_MTYPE_WC:
486                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
487                 break;
488         case AMDGPU_VM_MTYPE_CC:
489                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
490                 break;
491         case AMDGPU_VM_MTYPE_UC:
492                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
493                 break;
494         default:
495                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
496                 break;
497         }
498
499         if (flags & AMDGPU_VM_PAGE_PRT)
500                 pte_flag |= AMDGPU_PTE_PRT;
501
502         return pte_flag;
503 }
504
505 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
506                                 uint64_t *addr, uint64_t *flags)
507 {
508         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
509                 *addr = adev->vm_manager.vram_base_offset + *addr -
510                         adev->gmc.vram_start;
511         BUG_ON(*addr & 0xFFFF00000000003FULL);
512
513         if (!adev->gmc.translate_further)
514                 return;
515
516         if (level == AMDGPU_VM_PDB1) {
517                 /* Set the block fragment size */
518                 if (!(*flags & AMDGPU_PDE_PTE))
519                         *flags |= AMDGPU_PDE_BFS(0x9);
520
521         } else if (level == AMDGPU_VM_PDB0) {
522                 if (*flags & AMDGPU_PDE_PTE)
523                         *flags &= ~AMDGPU_PDE_PTE;
524                 else
525                         *flags |= AMDGPU_PTE_TF;
526         }
527 }
528
529 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
530         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
531         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
532         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
533         .set_pte_pde = gmc_v9_0_set_pte_pde,
534         .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
535         .get_vm_pde = gmc_v9_0_get_vm_pde
536 };
537
538 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
539 {
540         adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
541 }
542
543 static int gmc_v9_0_early_init(void *handle)
544 {
545         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546
547         gmc_v9_0_set_gmc_funcs(adev);
548         gmc_v9_0_set_irq_funcs(adev);
549
550         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
551         adev->gmc.shared_aperture_end =
552                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
553         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
554         adev->gmc.private_aperture_end =
555                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
556
557         return 0;
558 }
559
560 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
561 {
562         uint32_t reg_val;
563         uint32_t reg_addr;
564         uint32_t field_val;
565         size_t i;
566         uint32_t fv2;
567         size_t lost_sheep;
568
569         DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
570
571         lost_sheep = 0;
572         for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
573                 reg_addr = ecc_umclocalcap_addrs[i];
574                 DRM_DEBUG("ecc: "
575                           "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
576                           i, reg_addr);
577                 reg_val = RREG32(reg_addr);
578                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
579                                           EccDis);
580                 DRM_DEBUG("ecc: "
581                           "reg_val: 0x%08x, "
582                           "EccDis: 0x%08x, ",
583                           reg_val, field_val);
584                 if (field_val) {
585                         DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
586                         ++lost_sheep;
587                 }
588         }
589
590         for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
591                 reg_addr = ecc_umcch_umc_config_addrs[i];
592                 DRM_DEBUG("ecc: "
593                           "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
594                           i, reg_addr);
595                 reg_val = RREG32(reg_addr);
596                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
597                                           DramReady);
598                 DRM_DEBUG("ecc: "
599                           "reg_val: 0x%08x, "
600                           "DramReady: 0x%08x\n",
601                           reg_val, field_val);
602
603                 if (!field_val) {
604                         DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
605                         ++lost_sheep;
606                 }
607         }
608
609         for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
610                 reg_addr = ecc_umcch_eccctrl_addrs[i];
611                 DRM_DEBUG("ecc: "
612                           "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
613                           i, reg_addr);
614                 reg_val = RREG32(reg_addr);
615                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
616                                           WrEccEn);
617                 fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
618                                     RdEccEn);
619                 DRM_DEBUG("ecc: "
620                           "reg_val: 0x%08x, "
621                           "WrEccEn: 0x%08x, "
622                           "RdEccEn: 0x%08x\n",
623                           reg_val, field_val, fv2);
624
625                 if (!field_val) {
626                         DRM_DEBUG("ecc: WrEccEn is not set\n");
627                         ++lost_sheep;
628                 }
629                 if (!fv2) {
630                         DRM_DEBUG("ecc: RdEccEn is not set\n");
631                         ++lost_sheep;
632                 }
633         }
634
635         DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
636         return lost_sheep == 0;
637 }
638
639 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
640 {
641
642         /*
643          * TODO:
644          * Currently there is a bug where some memory client outside
645          * of the driver writes to first 8M of VRAM on S3 resume,
646          * this overrides GART which by default gets placed in first 8M and
647          * causes VM_FAULTS once GTT is accessed.
648          * Keep the stolen memory reservation until the while this is not solved.
649          * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
650          */
651         switch (adev->asic_type) {
652         case CHIP_VEGA10:
653                 return true;
654         case CHIP_RAVEN:
655         case CHIP_VEGA12:
656         case CHIP_VEGA20:
657         default:
658                 return false;
659         }
660 }
661
662 static int gmc_v9_0_late_init(void *handle)
663 {
664         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665         /*
666          * The latest engine allocation on gfx9 is:
667          * Engine 0, 1: idle
668          * Engine 2, 3: firmware
669          * Engine 4~13: amdgpu ring, subject to change when ring number changes
670          * Engine 14~15: idle
671          * Engine 16: kfd tlb invalidation
672          * Engine 17: Gart flushes
673          */
674         unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
675         unsigned i;
676         int r;
677
678         if (!gmc_v9_0_keep_stolen_memory(adev))
679                 amdgpu_bo_late_init(adev);
680
681         for(i = 0; i < adev->num_rings; ++i) {
682                 struct amdgpu_ring *ring = adev->rings[i];
683                 unsigned vmhub = ring->funcs->vmhub;
684
685                 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
686                 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
687                          ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
688         }
689
690         /* Engine 16 is used for KFD and 17 for GART flushes */
691         for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
692                 BUG_ON(vm_inv_eng[i] > 16);
693
694         if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
695                 r = gmc_v9_0_ecc_available(adev);
696                 if (r == 1) {
697                         DRM_INFO("ECC is active.\n");
698                 } else if (r == 0) {
699                         DRM_INFO("ECC is not present.\n");
700                         adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
701                 } else {
702                         DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
703                         return r;
704                 }
705         }
706
707         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
708 }
709
710 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
711                                         struct amdgpu_gmc *mc)
712 {
713         u64 base = 0;
714         if (!amdgpu_sriov_vf(adev))
715                 base = mmhub_v1_0_get_fb_location(adev);
716         /* add the xgmi offset of the physical node */
717         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
718         amdgpu_gmc_vram_location(adev, &adev->gmc, base);
719         amdgpu_gmc_gart_location(adev, mc);
720         if (!amdgpu_sriov_vf(adev))
721                 amdgpu_gmc_agp_location(adev, mc);
722         /* base offset of vram pages */
723         adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
724
725         /* XXX: add the xgmi offset of the physical node? */
726         adev->vm_manager.vram_base_offset +=
727                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
728 }
729
730 /**
731  * gmc_v9_0_mc_init - initialize the memory controller driver params
732  *
733  * @adev: amdgpu_device pointer
734  *
735  * Look up the amount of vram, vram width, and decide how to place
736  * vram and gart within the GPU's physical address space.
737  * Returns 0 for success.
738  */
739 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
740 {
741         int chansize, numchan;
742         int r;
743
744         if (amdgpu_emu_mode != 1)
745                 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
746         if (!adev->gmc.vram_width) {
747                 /* hbm memory channel size */
748                 if (adev->flags & AMD_IS_APU)
749                         chansize = 64;
750                 else
751                         chansize = 128;
752
753                 numchan = adev->df_funcs->get_hbm_channel_number(adev);
754                 adev->gmc.vram_width = numchan * chansize;
755         }
756
757         /* size in MB on si */
758         adev->gmc.mc_vram_size =
759                 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
760         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
761
762         if (!(adev->flags & AMD_IS_APU)) {
763                 r = amdgpu_device_resize_fb_bar(adev);
764                 if (r)
765                         return r;
766         }
767         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
768         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
769
770 #ifdef CONFIG_X86_64
771         if (adev->flags & AMD_IS_APU) {
772                 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
773                 adev->gmc.aper_size = adev->gmc.real_vram_size;
774         }
775 #endif
776         /* In case the PCI BAR is larger than the actual amount of vram */
777         adev->gmc.visible_vram_size = adev->gmc.aper_size;
778         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
779                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
780
781         /* set the gart size */
782         if (amdgpu_gart_size == -1) {
783                 switch (adev->asic_type) {
784                 case CHIP_VEGA10:  /* all engines support GPUVM */
785                 case CHIP_VEGA12:  /* all engines support GPUVM */
786                 case CHIP_VEGA20:
787                 default:
788                         adev->gmc.gart_size = 512ULL << 20;
789                         break;
790                 case CHIP_RAVEN:   /* DCE SG support */
791                         adev->gmc.gart_size = 1024ULL << 20;
792                         break;
793                 }
794         } else {
795                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
796         }
797
798         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
799
800         return 0;
801 }
802
803 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
804 {
805         int r;
806
807         if (adev->gart.bo) {
808                 WARN(1, "VEGA10 PCIE GART already initialized\n");
809                 return 0;
810         }
811         /* Initialize common gart structure */
812         r = amdgpu_gart_init(adev);
813         if (r)
814                 return r;
815         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
816         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
817                                  AMDGPU_PTE_EXECUTABLE;
818         return amdgpu_gart_table_vram_alloc(adev);
819 }
820
821 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
822 {
823         u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
824         unsigned size;
825
826         /*
827          * TODO Remove once GART corruption is resolved
828          * Check related code in gmc_v9_0_sw_fini
829          * */
830         if (gmc_v9_0_keep_stolen_memory(adev))
831                 return 9 * 1024 * 1024;
832
833         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
834                 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
835         } else {
836                 u32 viewport;
837
838                 switch (adev->asic_type) {
839                 case CHIP_RAVEN:
840                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
841                         size = (REG_GET_FIELD(viewport,
842                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
843                                 REG_GET_FIELD(viewport,
844                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
845                                 4);
846                         break;
847                 case CHIP_VEGA10:
848                 case CHIP_VEGA12:
849                 case CHIP_VEGA20:
850                 default:
851                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
852                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
853                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
854                                 4);
855                         break;
856                 }
857         }
858         /* return 0 if the pre-OS buffer uses up most of vram */
859         if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
860                 return 0;
861
862         return size;
863 }
864
865 static int gmc_v9_0_sw_init(void *handle)
866 {
867         int r;
868         int dma_bits;
869         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
870
871         gfxhub_v1_0_init(adev);
872         mmhub_v1_0_init(adev);
873
874         spin_lock_init(&adev->gmc.invalidate_lock);
875
876         adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
877         switch (adev->asic_type) {
878         case CHIP_RAVEN:
879                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
880                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
881                 } else {
882                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
883                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
884                         adev->gmc.translate_further =
885                                 adev->vm_manager.num_level > 1;
886                 }
887                 break;
888         case CHIP_VEGA10:
889         case CHIP_VEGA12:
890         case CHIP_VEGA20:
891                 /*
892                  * To fulfill 4-level page support,
893                  * vm size is 256TB (48bit), maximum size of Vega10,
894                  * block size 512 (9bit)
895                  */
896                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
897                 break;
898         default:
899                 break;
900         }
901
902         /* This interrupt is VMC page fault.*/
903         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
904                                 &adev->gmc.vm_fault);
905         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
906                                 &adev->gmc.vm_fault);
907
908         if (r)
909                 return r;
910
911         /* Set the internal MC address mask
912          * This is the max address of the GPU's
913          * internal address space.
914          */
915         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
916
917         /* set DMA mask + need_dma32 flags.
918          * PCIE - can handle 44-bits.
919          * IGP - can handle 44-bits
920          * PCI - dma32 for legacy pci gart, 44 bits on vega10
921          */
922         adev->need_dma32 = false;
923         dma_bits = adev->need_dma32 ? 32 : 44;
924         r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
925         if (r) {
926                 adev->need_dma32 = true;
927                 dma_bits = 32;
928                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
929         }
930         r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
931         if (r) {
932                 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
933                 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
934         }
935         adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
936
937         if (adev->gmc.xgmi.supported) {
938                 r = gfxhub_v1_1_get_xgmi_info(adev);
939                 if (r)
940                         return r;
941         }
942
943         r = gmc_v9_0_mc_init(adev);
944         if (r)
945                 return r;
946
947         adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
948
949         /* Memory manager */
950         r = amdgpu_bo_init(adev);
951         if (r)
952                 return r;
953
954         r = gmc_v9_0_gart_init(adev);
955         if (r)
956                 return r;
957
958         /*
959          * number of VMs
960          * VMID 0 is reserved for System
961          * amdgpu graphics/compute will use VMIDs 1-7
962          * amdkfd will use VMIDs 8-15
963          */
964         adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
965         adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
966
967         amdgpu_vm_manager_init(adev);
968
969         return 0;
970 }
971
972 static int gmc_v9_0_sw_fini(void *handle)
973 {
974         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
975
976         amdgpu_gem_force_release(adev);
977         amdgpu_vm_manager_fini(adev);
978
979         if (gmc_v9_0_keep_stolen_memory(adev))
980                 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
981
982         amdgpu_gart_table_vram_free(adev);
983         amdgpu_bo_fini(adev);
984         amdgpu_gart_fini(adev);
985
986         return 0;
987 }
988
989 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
990 {
991
992         switch (adev->asic_type) {
993         case CHIP_VEGA10:
994         case CHIP_VEGA20:
995                 soc15_program_register_sequence(adev,
996                                                 golden_settings_mmhub_1_0_0,
997                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
998                 soc15_program_register_sequence(adev,
999                                                 golden_settings_athub_1_0_0,
1000                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1001                 break;
1002         case CHIP_VEGA12:
1003                 break;
1004         case CHIP_RAVEN:
1005                 soc15_program_register_sequence(adev,
1006                                                 golden_settings_athub_1_0_0,
1007                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1008                 break;
1009         default:
1010                 break;
1011         }
1012 }
1013
1014 /**
1015  * gmc_v9_0_gart_enable - gart enable
1016  *
1017  * @adev: amdgpu_device pointer
1018  */
1019 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1020 {
1021         int r;
1022         bool value;
1023         u32 tmp;
1024
1025         amdgpu_device_program_register_sequence(adev,
1026                                                 golden_settings_vega10_hdp,
1027                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1028
1029         if (adev->gart.bo == NULL) {
1030                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1031                 return -EINVAL;
1032         }
1033         r = amdgpu_gart_table_vram_pin(adev);
1034         if (r)
1035                 return r;
1036
1037         switch (adev->asic_type) {
1038         case CHIP_RAVEN:
1039                 mmhub_v1_0_update_power_gating(adev, true);
1040                 break;
1041         default:
1042                 break;
1043         }
1044
1045         r = gfxhub_v1_0_gart_enable(adev);
1046         if (r)
1047                 return r;
1048
1049         r = mmhub_v1_0_gart_enable(adev);
1050         if (r)
1051                 return r;
1052
1053         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1054
1055         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1056         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1057
1058         /* After HDP is initialized, flush HDP.*/
1059         adev->nbio_funcs->hdp_flush(adev, NULL);
1060
1061         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1062                 value = false;
1063         else
1064                 value = true;
1065
1066         gfxhub_v1_0_set_fault_enable_default(adev, value);
1067         mmhub_v1_0_set_fault_enable_default(adev, value);
1068         gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1069
1070         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1071                  (unsigned)(adev->gmc.gart_size >> 20),
1072                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1073         adev->gart.ready = true;
1074         return 0;
1075 }
1076
1077 static int gmc_v9_0_hw_init(void *handle)
1078 {
1079         int r;
1080         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1081
1082         /* The sequence of these two function calls matters.*/
1083         gmc_v9_0_init_golden_registers(adev);
1084
1085         if (adev->mode_info.num_crtc) {
1086                 /* Lockout access through VGA aperture*/
1087                 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1088
1089                 /* disable VGA render */
1090                 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1091         }
1092
1093         r = gmc_v9_0_gart_enable(adev);
1094
1095         return r;
1096 }
1097
1098 /**
1099  * gmc_v9_0_gart_disable - gart disable
1100  *
1101  * @adev: amdgpu_device pointer
1102  *
1103  * This disables all VM page table.
1104  */
1105 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1106 {
1107         gfxhub_v1_0_gart_disable(adev);
1108         mmhub_v1_0_gart_disable(adev);
1109         amdgpu_gart_table_vram_unpin(adev);
1110 }
1111
1112 static int gmc_v9_0_hw_fini(void *handle)
1113 {
1114         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115
1116         if (amdgpu_sriov_vf(adev)) {
1117                 /* full access mode, so don't touch any GMC register */
1118                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1119                 return 0;
1120         }
1121
1122         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1123         gmc_v9_0_gart_disable(adev);
1124
1125         return 0;
1126 }
1127
1128 static int gmc_v9_0_suspend(void *handle)
1129 {
1130         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131
1132         return gmc_v9_0_hw_fini(adev);
1133 }
1134
1135 static int gmc_v9_0_resume(void *handle)
1136 {
1137         int r;
1138         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139
1140         r = gmc_v9_0_hw_init(adev);
1141         if (r)
1142                 return r;
1143
1144         amdgpu_vmid_reset_all(adev);
1145
1146         return 0;
1147 }
1148
1149 static bool gmc_v9_0_is_idle(void *handle)
1150 {
1151         /* MC is always ready in GMC v9.*/
1152         return true;
1153 }
1154
1155 static int gmc_v9_0_wait_for_idle(void *handle)
1156 {
1157         /* There is no need to wait for MC idle in GMC v9.*/
1158         return 0;
1159 }
1160
1161 static int gmc_v9_0_soft_reset(void *handle)
1162 {
1163         /* XXX for emulation.*/
1164         return 0;
1165 }
1166
1167 static int gmc_v9_0_set_clockgating_state(void *handle,
1168                                         enum amd_clockgating_state state)
1169 {
1170         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1171
1172         return mmhub_v1_0_set_clockgating(adev, state);
1173 }
1174
1175 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1176 {
1177         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1178
1179         mmhub_v1_0_get_clockgating(adev, flags);
1180 }
1181
1182 static int gmc_v9_0_set_powergating_state(void *handle,
1183                                         enum amd_powergating_state state)
1184 {
1185         return 0;
1186 }
1187
1188 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1189         .name = "gmc_v9_0",
1190         .early_init = gmc_v9_0_early_init,
1191         .late_init = gmc_v9_0_late_init,
1192         .sw_init = gmc_v9_0_sw_init,
1193         .sw_fini = gmc_v9_0_sw_fini,
1194         .hw_init = gmc_v9_0_hw_init,
1195         .hw_fini = gmc_v9_0_hw_fini,
1196         .suspend = gmc_v9_0_suspend,
1197         .resume = gmc_v9_0_resume,
1198         .is_idle = gmc_v9_0_is_idle,
1199         .wait_for_idle = gmc_v9_0_wait_for_idle,
1200         .soft_reset = gmc_v9_0_soft_reset,
1201         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1202         .set_powergating_state = gmc_v9_0_set_powergating_state,
1203         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1204 };
1205
1206 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1207 {
1208         .type = AMD_IP_BLOCK_TYPE_GMC,
1209         .major = 9,
1210         .minor = 0,
1211         .rev = 0,
1212         .funcs = &gmc_v9_0_ip_funcs,
1213 };