Merge drm/drm-next into drm-misc-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_sh_mask.h"
42 #include "athub/athub_1_0_offset.h"
43 #include "oss/osssys_4_0_offset.h"
44
45 #include "soc15.h"
46 #include "soc15d.h"
47 #include "soc15_common.h"
48 #include "umc/umc_6_0_sh_mask.h"
49
50 #include "gfxhub_v1_0.h"
51 #include "mmhub_v1_0.h"
52 #include "athub_v1_0.h"
53 #include "gfxhub_v1_1.h"
54 #include "mmhub_v9_4.h"
55 #include "umc_v6_1.h"
56 #include "umc_v6_0.h"
57
58 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
59
60 #include "amdgpu_ras.h"
61 #include "amdgpu_xgmi.h"
62
63 /* add these here since we already include dce12 headers and these are for DCN */
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
65 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
70 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
71 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
72
73
74 static const char *gfxhub_client_ids[] = {
75         "CB",
76         "DB",
77         "IA",
78         "WD",
79         "CPF",
80         "CPC",
81         "CPG",
82         "RLC",
83         "TCP",
84         "SQC (inst)",
85         "SQC (data)",
86         "SQG",
87         "PA",
88 };
89
90 static const char *mmhub_client_ids_raven[][2] = {
91         [0][0] = "MP1",
92         [1][0] = "MP0",
93         [2][0] = "VCN",
94         [3][0] = "VCNU",
95         [4][0] = "HDP",
96         [5][0] = "DCE",
97         [13][0] = "UTCL2",
98         [19][0] = "TLS",
99         [26][0] = "OSS",
100         [27][0] = "SDMA0",
101         [0][1] = "MP1",
102         [1][1] = "MP0",
103         [2][1] = "VCN",
104         [3][1] = "VCNU",
105         [4][1] = "HDP",
106         [5][1] = "XDP",
107         [6][1] = "DBGU0",
108         [7][1] = "DCE",
109         [8][1] = "DCEDWB0",
110         [9][1] = "DCEDWB1",
111         [26][1] = "OSS",
112         [27][1] = "SDMA0",
113 };
114
115 static const char *mmhub_client_ids_renoir[][2] = {
116         [0][0] = "MP1",
117         [1][0] = "MP0",
118         [2][0] = "HDP",
119         [4][0] = "DCEDMC",
120         [5][0] = "DCEVGA",
121         [13][0] = "UTCL2",
122         [19][0] = "TLS",
123         [26][0] = "OSS",
124         [27][0] = "SDMA0",
125         [28][0] = "VCN",
126         [29][0] = "VCNU",
127         [30][0] = "JPEG",
128         [0][1] = "MP1",
129         [1][1] = "MP0",
130         [2][1] = "HDP",
131         [3][1] = "XDP",
132         [6][1] = "DBGU0",
133         [7][1] = "DCEDMC",
134         [8][1] = "DCEVGA",
135         [9][1] = "DCEDWB",
136         [26][1] = "OSS",
137         [27][1] = "SDMA0",
138         [28][1] = "VCN",
139         [29][1] = "VCNU",
140         [30][1] = "JPEG",
141 };
142
143 static const char *mmhub_client_ids_vega10[][2] = {
144         [0][0] = "MP0",
145         [1][0] = "UVD",
146         [2][0] = "UVDU",
147         [3][0] = "HDP",
148         [13][0] = "UTCL2",
149         [14][0] = "OSS",
150         [15][0] = "SDMA1",
151         [32+0][0] = "VCE0",
152         [32+1][0] = "VCE0U",
153         [32+2][0] = "XDMA",
154         [32+3][0] = "DCE",
155         [32+4][0] = "MP1",
156         [32+14][0] = "SDMA0",
157         [0][1] = "MP0",
158         [1][1] = "UVD",
159         [2][1] = "UVDU",
160         [3][1] = "DBGU0",
161         [4][1] = "HDP",
162         [5][1] = "XDP",
163         [14][1] = "OSS",
164         [15][1] = "SDMA0",
165         [32+0][1] = "VCE0",
166         [32+1][1] = "VCE0U",
167         [32+2][1] = "XDMA",
168         [32+3][1] = "DCE",
169         [32+4][1] = "DCEDWB",
170         [32+5][1] = "MP1",
171         [32+6][1] = "DBGU1",
172         [32+14][1] = "SDMA1",
173 };
174
175 static const char *mmhub_client_ids_vega12[][2] = {
176         [0][0] = "MP0",
177         [1][0] = "VCE0",
178         [2][0] = "VCE0U",
179         [3][0] = "HDP",
180         [13][0] = "UTCL2",
181         [14][0] = "OSS",
182         [15][0] = "SDMA1",
183         [32+0][0] = "DCE",
184         [32+1][0] = "XDMA",
185         [32+2][0] = "UVD",
186         [32+3][0] = "UVDU",
187         [32+4][0] = "MP1",
188         [32+15][0] = "SDMA0",
189         [0][1] = "MP0",
190         [1][1] = "VCE0",
191         [2][1] = "VCE0U",
192         [3][1] = "DBGU0",
193         [4][1] = "HDP",
194         [5][1] = "XDP",
195         [14][1] = "OSS",
196         [15][1] = "SDMA0",
197         [32+0][1] = "DCE",
198         [32+1][1] = "DCEDWB",
199         [32+2][1] = "XDMA",
200         [32+3][1] = "UVD",
201         [32+4][1] = "UVDU",
202         [32+5][1] = "MP1",
203         [32+6][1] = "DBGU1",
204         [32+15][1] = "SDMA1",
205 };
206
207 static const char *mmhub_client_ids_vega20[][2] = {
208         [0][0] = "XDMA",
209         [1][0] = "DCE",
210         [2][0] = "VCE0",
211         [3][0] = "VCE0U",
212         [4][0] = "UVD",
213         [5][0] = "UVD1U",
214         [13][0] = "OSS",
215         [14][0] = "HDP",
216         [15][0] = "SDMA0",
217         [32+0][0] = "UVD",
218         [32+1][0] = "UVDU",
219         [32+2][0] = "MP1",
220         [32+3][0] = "MP0",
221         [32+12][0] = "UTCL2",
222         [32+14][0] = "SDMA1",
223         [0][1] = "XDMA",
224         [1][1] = "DCE",
225         [2][1] = "DCEDWB",
226         [3][1] = "VCE0",
227         [4][1] = "VCE0U",
228         [5][1] = "UVD1",
229         [6][1] = "UVD1U",
230         [7][1] = "DBGU0",
231         [8][1] = "XDP",
232         [13][1] = "OSS",
233         [14][1] = "HDP",
234         [15][1] = "SDMA0",
235         [32+0][1] = "UVD",
236         [32+1][1] = "UVDU",
237         [32+2][1] = "DBGU1",
238         [32+3][1] = "MP1",
239         [32+4][1] = "MP0",
240         [32+14][1] = "SDMA1",
241 };
242
243 static const char *mmhub_client_ids_arcturus[][2] = {
244         [2][0] = "MP1",
245         [3][0] = "MP0",
246         [10][0] = "UTCL2",
247         [13][0] = "OSS",
248         [14][0] = "HDP",
249         [15][0] = "SDMA0",
250         [32+15][0] = "SDMA1",
251         [64+15][0] = "SDMA2",
252         [96+15][0] = "SDMA3",
253         [128+15][0] = "SDMA4",
254         [160+11][0] = "JPEG",
255         [160+12][0] = "VCN",
256         [160+13][0] = "VCNU",
257         [160+15][0] = "SDMA5",
258         [192+10][0] = "UTCL2",
259         [192+11][0] = "JPEG1",
260         [192+12][0] = "VCN1",
261         [192+13][0] = "VCN1U",
262         [192+15][0] = "SDMA6",
263         [224+15][0] = "SDMA7",
264         [0][1] = "DBGU1",
265         [1][1] = "XDP",
266         [2][1] = "MP1",
267         [3][1] = "MP0",
268         [13][1] = "OSS",
269         [14][1] = "HDP",
270         [15][1] = "SDMA0",
271         [32+15][1] = "SDMA1",
272         [64+15][1] = "SDMA2",
273         [96+15][1] = "SDMA3",
274         [128+15][1] = "SDMA4",
275         [160+11][1] = "JPEG",
276         [160+12][1] = "VCN",
277         [160+13][1] = "VCNU",
278         [160+15][1] = "SDMA5",
279         [192+11][1] = "JPEG1",
280         [192+12][1] = "VCN1",
281         [192+13][1] = "VCN1U",
282         [192+15][1] = "SDMA6",
283         [224+15][1] = "SDMA7",
284 };
285
286 static const u32 golden_settings_vega10_hdp[] =
287 {
288         0xf64, 0x0fffffff, 0x00000000,
289         0xf65, 0x0fffffff, 0x00000000,
290         0xf66, 0x0fffffff, 0x00000000,
291         0xf67, 0x0fffffff, 0x00000000,
292         0xf68, 0x0fffffff, 0x00000000,
293         0xf6a, 0x0fffffff, 0x00000000,
294         0xf6b, 0x0fffffff, 0x00000000,
295         0xf6c, 0x0fffffff, 0x00000000,
296         0xf6d, 0x0fffffff, 0x00000000,
297         0xf6e, 0x0fffffff, 0x00000000,
298 };
299
300 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
301 {
302         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
303         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
304 };
305
306 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
307 {
308         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
309         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
310 };
311
312 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
313         (0x000143c0 + 0x00000000),
314         (0x000143c0 + 0x00000800),
315         (0x000143c0 + 0x00001000),
316         (0x000143c0 + 0x00001800),
317         (0x000543c0 + 0x00000000),
318         (0x000543c0 + 0x00000800),
319         (0x000543c0 + 0x00001000),
320         (0x000543c0 + 0x00001800),
321         (0x000943c0 + 0x00000000),
322         (0x000943c0 + 0x00000800),
323         (0x000943c0 + 0x00001000),
324         (0x000943c0 + 0x00001800),
325         (0x000d43c0 + 0x00000000),
326         (0x000d43c0 + 0x00000800),
327         (0x000d43c0 + 0x00001000),
328         (0x000d43c0 + 0x00001800),
329         (0x001143c0 + 0x00000000),
330         (0x001143c0 + 0x00000800),
331         (0x001143c0 + 0x00001000),
332         (0x001143c0 + 0x00001800),
333         (0x001543c0 + 0x00000000),
334         (0x001543c0 + 0x00000800),
335         (0x001543c0 + 0x00001000),
336         (0x001543c0 + 0x00001800),
337         (0x001943c0 + 0x00000000),
338         (0x001943c0 + 0x00000800),
339         (0x001943c0 + 0x00001000),
340         (0x001943c0 + 0x00001800),
341         (0x001d43c0 + 0x00000000),
342         (0x001d43c0 + 0x00000800),
343         (0x001d43c0 + 0x00001000),
344         (0x001d43c0 + 0x00001800),
345 };
346
347 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
348         (0x000143e0 + 0x00000000),
349         (0x000143e0 + 0x00000800),
350         (0x000143e0 + 0x00001000),
351         (0x000143e0 + 0x00001800),
352         (0x000543e0 + 0x00000000),
353         (0x000543e0 + 0x00000800),
354         (0x000543e0 + 0x00001000),
355         (0x000543e0 + 0x00001800),
356         (0x000943e0 + 0x00000000),
357         (0x000943e0 + 0x00000800),
358         (0x000943e0 + 0x00001000),
359         (0x000943e0 + 0x00001800),
360         (0x000d43e0 + 0x00000000),
361         (0x000d43e0 + 0x00000800),
362         (0x000d43e0 + 0x00001000),
363         (0x000d43e0 + 0x00001800),
364         (0x001143e0 + 0x00000000),
365         (0x001143e0 + 0x00000800),
366         (0x001143e0 + 0x00001000),
367         (0x001143e0 + 0x00001800),
368         (0x001543e0 + 0x00000000),
369         (0x001543e0 + 0x00000800),
370         (0x001543e0 + 0x00001000),
371         (0x001543e0 + 0x00001800),
372         (0x001943e0 + 0x00000000),
373         (0x001943e0 + 0x00000800),
374         (0x001943e0 + 0x00001000),
375         (0x001943e0 + 0x00001800),
376         (0x001d43e0 + 0x00000000),
377         (0x001d43e0 + 0x00000800),
378         (0x001d43e0 + 0x00001000),
379         (0x001d43e0 + 0x00001800),
380 };
381
382 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
383         (0x000143c2 + 0x00000000),
384         (0x000143c2 + 0x00000800),
385         (0x000143c2 + 0x00001000),
386         (0x000143c2 + 0x00001800),
387         (0x000543c2 + 0x00000000),
388         (0x000543c2 + 0x00000800),
389         (0x000543c2 + 0x00001000),
390         (0x000543c2 + 0x00001800),
391         (0x000943c2 + 0x00000000),
392         (0x000943c2 + 0x00000800),
393         (0x000943c2 + 0x00001000),
394         (0x000943c2 + 0x00001800),
395         (0x000d43c2 + 0x00000000),
396         (0x000d43c2 + 0x00000800),
397         (0x000d43c2 + 0x00001000),
398         (0x000d43c2 + 0x00001800),
399         (0x001143c2 + 0x00000000),
400         (0x001143c2 + 0x00000800),
401         (0x001143c2 + 0x00001000),
402         (0x001143c2 + 0x00001800),
403         (0x001543c2 + 0x00000000),
404         (0x001543c2 + 0x00000800),
405         (0x001543c2 + 0x00001000),
406         (0x001543c2 + 0x00001800),
407         (0x001943c2 + 0x00000000),
408         (0x001943c2 + 0x00000800),
409         (0x001943c2 + 0x00001000),
410         (0x001943c2 + 0x00001800),
411         (0x001d43c2 + 0x00000000),
412         (0x001d43c2 + 0x00000800),
413         (0x001d43c2 + 0x00001000),
414         (0x001d43c2 + 0x00001800),
415 };
416
417 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
418                 struct amdgpu_irq_src *src,
419                 unsigned type,
420                 enum amdgpu_interrupt_state state)
421 {
422         u32 bits, i, tmp, reg;
423
424         /* Devices newer then VEGA10/12 shall have these programming
425              sequences performed by PSP BL */
426         if (adev->asic_type >= CHIP_VEGA20)
427                 return 0;
428
429         bits = 0x7f;
430
431         switch (state) {
432         case AMDGPU_IRQ_STATE_DISABLE:
433                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
434                         reg = ecc_umc_mcumc_ctrl_addrs[i];
435                         tmp = RREG32(reg);
436                         tmp &= ~bits;
437                         WREG32(reg, tmp);
438                 }
439                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
440                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
441                         tmp = RREG32(reg);
442                         tmp &= ~bits;
443                         WREG32(reg, tmp);
444                 }
445                 break;
446         case AMDGPU_IRQ_STATE_ENABLE:
447                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
448                         reg = ecc_umc_mcumc_ctrl_addrs[i];
449                         tmp = RREG32(reg);
450                         tmp |= bits;
451                         WREG32(reg, tmp);
452                 }
453                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
454                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
455                         tmp = RREG32(reg);
456                         tmp |= bits;
457                         WREG32(reg, tmp);
458                 }
459                 break;
460         default:
461                 break;
462         }
463
464         return 0;
465 }
466
467 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
468                                         struct amdgpu_irq_src *src,
469                                         unsigned type,
470                                         enum amdgpu_interrupt_state state)
471 {
472         struct amdgpu_vmhub *hub;
473         u32 tmp, reg, bits, i, j;
474
475         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
480                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
481                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
482
483         switch (state) {
484         case AMDGPU_IRQ_STATE_DISABLE:
485                 for (j = 0; j < adev->num_vmhubs; j++) {
486                         hub = &adev->vmhub[j];
487                         for (i = 0; i < 16; i++) {
488                                 reg = hub->vm_context0_cntl + i;
489                                 tmp = RREG32(reg);
490                                 tmp &= ~bits;
491                                 WREG32(reg, tmp);
492                         }
493                 }
494                 break;
495         case AMDGPU_IRQ_STATE_ENABLE:
496                 for (j = 0; j < adev->num_vmhubs; j++) {
497                         hub = &adev->vmhub[j];
498                         for (i = 0; i < 16; i++) {
499                                 reg = hub->vm_context0_cntl + i;
500                                 tmp = RREG32(reg);
501                                 tmp |= bits;
502                                 WREG32(reg, tmp);
503                         }
504                 }
505         default:
506                 break;
507         }
508
509         return 0;
510 }
511
512 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
513                                       struct amdgpu_irq_src *source,
514                                       struct amdgpu_iv_entry *entry)
515 {
516         bool retry_fault = !!(entry->src_data[1] & 0x80);
517         uint32_t status = 0, cid = 0, rw = 0;
518         struct amdgpu_task_info task_info;
519         struct amdgpu_vmhub *hub;
520         const char *mmhub_cid;
521         const char *hub_name;
522         u64 addr;
523
524         addr = (u64)entry->src_data[0] << 12;
525         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
526
527         if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
528                                                     entry->timestamp))
529                 return 1; /* This also prevents sending it to KFD */
530
531         /* If it's the first fault for this address, process it normally */
532         if (retry_fault && !in_interrupt() &&
533             amdgpu_vm_handle_fault(adev, entry->pasid, addr))
534                 return 1; /* This also prevents sending it to KFD */
535
536         if (!printk_ratelimit())
537                 return 0;
538
539         if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
540                 hub_name = "mmhub0";
541                 hub = &adev->vmhub[AMDGPU_MMHUB_0];
542         } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
543                 hub_name = "mmhub1";
544                 hub = &adev->vmhub[AMDGPU_MMHUB_1];
545         } else {
546                 hub_name = "gfxhub0";
547                 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
548         }
549
550         memset(&task_info, 0, sizeof(struct amdgpu_task_info));
551         amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
552
553         dev_err(adev->dev,
554                 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
555                 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
556                 hub_name, retry_fault ? "retry" : "no-retry",
557                 entry->src_id, entry->ring_id, entry->vmid,
558                 entry->pasid, task_info.process_name, task_info.tgid,
559                 task_info.task_name, task_info.pid);
560         dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
561                 addr, entry->client_id);
562
563         if (amdgpu_sriov_vf(adev))
564                 return 0;
565
566         /*
567          * Issue a dummy read to wait for the status register to
568          * be updated to avoid reading an incorrect value due to
569          * the new fast GRBM interface.
570          */
571         if (entry->vmid_src == AMDGPU_GFXHUB_0)
572                 RREG32(hub->vm_l2_pro_fault_status);
573
574         status = RREG32(hub->vm_l2_pro_fault_status);
575         cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
576         rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
577         WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
578
579
580         dev_err(adev->dev,
581                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
582                 status);
583         if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
584                 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
585                         cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
586                         gfxhub_client_ids[cid],
587                         cid);
588         } else {
589                 switch (adev->asic_type) {
590                 case CHIP_VEGA10:
591                         mmhub_cid = mmhub_client_ids_vega10[cid][rw];
592                         break;
593                 case CHIP_VEGA12:
594                         mmhub_cid = mmhub_client_ids_vega12[cid][rw];
595                         break;
596                 case CHIP_VEGA20:
597                         mmhub_cid = mmhub_client_ids_vega20[cid][rw];
598                         break;
599                 case CHIP_ARCTURUS:
600                         mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
601                         break;
602                 case CHIP_RAVEN:
603                         mmhub_cid = mmhub_client_ids_raven[cid][rw];
604                         break;
605                 case CHIP_RENOIR:
606                         mmhub_cid = mmhub_client_ids_renoir[cid][rw];
607                         break;
608                 default:
609                         mmhub_cid = NULL;
610                         break;
611                 }
612                 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
613                         mmhub_cid ? mmhub_cid : "unknown", cid);
614         }
615         dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
616                 REG_GET_FIELD(status,
617                 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
618         dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
619                 REG_GET_FIELD(status,
620                 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
621         dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
622                 REG_GET_FIELD(status,
623                 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
624         dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
625                 REG_GET_FIELD(status,
626                 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
627         dev_err(adev->dev, "\t RW: 0x%x\n", rw);
628         return 0;
629 }
630
631 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
632         .set = gmc_v9_0_vm_fault_interrupt_state,
633         .process = gmc_v9_0_process_interrupt,
634 };
635
636
637 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
638         .set = gmc_v9_0_ecc_interrupt_state,
639         .process = amdgpu_umc_process_ecc_irq,
640 };
641
642 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
643 {
644         adev->gmc.vm_fault.num_types = 1;
645         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
646
647         if (!amdgpu_sriov_vf(adev)) {
648                 adev->gmc.ecc_irq.num_types = 1;
649                 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
650         }
651 }
652
653 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
654                                         uint32_t flush_type)
655 {
656         u32 req = 0;
657
658         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
659                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
660         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
661         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
662         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
663         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
664         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
665         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
666         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
667                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
668
669         return req;
670 }
671
672 /**
673  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
674  *
675  * @adev: amdgpu_device pointer
676  * @vmhub: vmhub type
677  *
678  */
679 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
680                                        uint32_t vmhub)
681 {
682         return ((vmhub == AMDGPU_MMHUB_0 ||
683                  vmhub == AMDGPU_MMHUB_1) &&
684                 (!amdgpu_sriov_vf(adev)) &&
685                 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
686                    (adev->apu_flags & AMD_APU_IS_PICASSO))));
687 }
688
689 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
690                                         uint8_t vmid, uint16_t *p_pasid)
691 {
692         uint32_t value;
693
694         value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
695                      + vmid);
696         *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
697
698         return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
699 }
700
701 /*
702  * GART
703  * VMID 0 is the physical GPU addresses as used by the kernel.
704  * VMIDs 1-15 are used for userspace clients and are handled
705  * by the amdgpu vm/hsa code.
706  */
707
708 /**
709  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
710  *
711  * @adev: amdgpu_device pointer
712  * @vmid: vm instance to flush
713  * @flush_type: the flush type
714  *
715  * Flush the TLB for the requested page table using certain type.
716  */
717 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
718                                         uint32_t vmhub, uint32_t flush_type)
719 {
720         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
721         const unsigned eng = 17;
722         u32 j, inv_req, inv_req2, tmp;
723         struct amdgpu_vmhub *hub;
724
725         BUG_ON(vmhub >= adev->num_vmhubs);
726
727         hub = &adev->vmhub[vmhub];
728         if (adev->gmc.xgmi.num_physical_nodes &&
729             adev->asic_type == CHIP_VEGA20) {
730                 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
731                  * heavy-weight TLB flush (type 2), which flushes
732                  * both. Due to a race condition with concurrent
733                  * memory accesses using the same TLB cache line, we
734                  * still need a second TLB flush after this.
735                  */
736                 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
737                 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
738         } else {
739                 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
740                 inv_req2 = 0;
741         }
742
743         /* This is necessary for a HW workaround under SRIOV as well
744          * as GFXOFF under bare metal
745          */
746         if (adev->gfx.kiq.ring.sched.ready &&
747             (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
748             down_read_trylock(&adev->reset_sem)) {
749                 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
750                 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
751
752                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
753                                                    1 << vmid);
754                 up_read(&adev->reset_sem);
755                 return;
756         }
757
758         spin_lock(&adev->gmc.invalidate_lock);
759
760         /*
761          * It may lose gpuvm invalidate acknowldege state across power-gating
762          * off cycle, add semaphore acquire before invalidation and semaphore
763          * release after invalidation to avoid entering power gated state
764          * to WA the Issue
765          */
766
767         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
768         if (use_semaphore) {
769                 for (j = 0; j < adev->usec_timeout; j++) {
770                         /* a read return value of 1 means semaphore acuqire */
771                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
772                                             hub->eng_distance * eng);
773                         if (tmp & 0x1)
774                                 break;
775                         udelay(1);
776                 }
777
778                 if (j >= adev->usec_timeout)
779                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
780         }
781
782         do {
783                 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
784                               hub->eng_distance * eng, inv_req);
785
786                 /*
787                  * Issue a dummy read to wait for the ACK register to
788                  * be cleared to avoid a false ACK due to the new fast
789                  * GRBM interface.
790                  */
791                 if (vmhub == AMDGPU_GFXHUB_0)
792                         RREG32_NO_KIQ(hub->vm_inv_eng0_req +
793                                       hub->eng_distance * eng);
794
795                 for (j = 0; j < adev->usec_timeout; j++) {
796                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
797                                             hub->eng_distance * eng);
798                         if (tmp & (1 << vmid))
799                                 break;
800                         udelay(1);
801                 }
802
803                 inv_req = inv_req2;
804                 inv_req2 = 0;
805         } while (inv_req);
806
807         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
808         if (use_semaphore)
809                 /*
810                  * add semaphore release after invalidation,
811                  * write with 0 means semaphore release
812                  */
813                 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
814                               hub->eng_distance * eng, 0);
815
816         spin_unlock(&adev->gmc.invalidate_lock);
817
818         if (j < adev->usec_timeout)
819                 return;
820
821         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
822 }
823
824 /**
825  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
826  *
827  * @adev: amdgpu_device pointer
828  * @pasid: pasid to be flush
829  *
830  * Flush the TLB for the requested pasid.
831  */
832 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
833                                         uint16_t pasid, uint32_t flush_type,
834                                         bool all_hub)
835 {
836         int vmid, i;
837         signed long r;
838         uint32_t seq;
839         uint16_t queried_pasid;
840         bool ret;
841         struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
842         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
843
844         if (amdgpu_in_reset(adev))
845                 return -EIO;
846
847         if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
848                 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
849                  * heavy-weight TLB flush (type 2), which flushes
850                  * both. Due to a race condition with concurrent
851                  * memory accesses using the same TLB cache line, we
852                  * still need a second TLB flush after this.
853                  */
854                 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
855                                        adev->asic_type == CHIP_VEGA20);
856                 /* 2 dwords flush + 8 dwords fence */
857                 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
858
859                 if (vega20_xgmi_wa)
860                         ndw += kiq->pmf->invalidate_tlbs_size;
861
862                 spin_lock(&adev->gfx.kiq.ring_lock);
863                 /* 2 dwords flush + 8 dwords fence */
864                 amdgpu_ring_alloc(ring, ndw);
865                 if (vega20_xgmi_wa)
866                         kiq->pmf->kiq_invalidate_tlbs(ring,
867                                                       pasid, 2, all_hub);
868                 kiq->pmf->kiq_invalidate_tlbs(ring,
869                                         pasid, flush_type, all_hub);
870                 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
871                 if (r) {
872                         amdgpu_ring_undo(ring);
873                         spin_unlock(&adev->gfx.kiq.ring_lock);
874                         up_read(&adev->reset_sem);
875                         return -ETIME;
876                 }
877
878                 amdgpu_ring_commit(ring);
879                 spin_unlock(&adev->gfx.kiq.ring_lock);
880                 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
881                 if (r < 1) {
882                         dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
883                         up_read(&adev->reset_sem);
884                         return -ETIME;
885                 }
886                 up_read(&adev->reset_sem);
887                 return 0;
888         }
889
890         for (vmid = 1; vmid < 16; vmid++) {
891
892                 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
893                                 &queried_pasid);
894                 if (ret && queried_pasid == pasid) {
895                         if (all_hub) {
896                                 for (i = 0; i < adev->num_vmhubs; i++)
897                                         gmc_v9_0_flush_gpu_tlb(adev, vmid,
898                                                         i, flush_type);
899                         } else {
900                                 gmc_v9_0_flush_gpu_tlb(adev, vmid,
901                                                 AMDGPU_GFXHUB_0, flush_type);
902                         }
903                         break;
904                 }
905         }
906
907         return 0;
908
909 }
910
911 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
912                                             unsigned vmid, uint64_t pd_addr)
913 {
914         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
915         struct amdgpu_device *adev = ring->adev;
916         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
917         uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
918         unsigned eng = ring->vm_inv_eng;
919
920         /*
921          * It may lose gpuvm invalidate acknowldege state across power-gating
922          * off cycle, add semaphore acquire before invalidation and semaphore
923          * release after invalidation to avoid entering power gated state
924          * to WA the Issue
925          */
926
927         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
928         if (use_semaphore)
929                 /* a read return value of 1 means semaphore acuqire */
930                 amdgpu_ring_emit_reg_wait(ring,
931                                           hub->vm_inv_eng0_sem +
932                                           hub->eng_distance * eng, 0x1, 0x1);
933
934         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
935                               (hub->ctx_addr_distance * vmid),
936                               lower_32_bits(pd_addr));
937
938         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
939                               (hub->ctx_addr_distance * vmid),
940                               upper_32_bits(pd_addr));
941
942         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
943                                             hub->eng_distance * eng,
944                                             hub->vm_inv_eng0_ack +
945                                             hub->eng_distance * eng,
946                                             req, 1 << vmid);
947
948         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
949         if (use_semaphore)
950                 /*
951                  * add semaphore release after invalidation,
952                  * write with 0 means semaphore release
953                  */
954                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
955                                       hub->eng_distance * eng, 0);
956
957         return pd_addr;
958 }
959
960 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
961                                         unsigned pasid)
962 {
963         struct amdgpu_device *adev = ring->adev;
964         uint32_t reg;
965
966         /* Do nothing because there's no lut register for mmhub1. */
967         if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
968                 return;
969
970         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
971                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
972         else
973                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
974
975         amdgpu_ring_emit_wreg(ring, reg, pasid);
976 }
977
978 /*
979  * PTE format on VEGA 10:
980  * 63:59 reserved
981  * 58:57 mtype
982  * 56 F
983  * 55 L
984  * 54 P
985  * 53 SW
986  * 52 T
987  * 50:48 reserved
988  * 47:12 4k physical page base address
989  * 11:7 fragment
990  * 6 write
991  * 5 read
992  * 4 exe
993  * 3 Z
994  * 2 snooped
995  * 1 system
996  * 0 valid
997  *
998  * PDE format on VEGA 10:
999  * 63:59 block fragment size
1000  * 58:55 reserved
1001  * 54 P
1002  * 53:48 reserved
1003  * 47:6 physical base address of PD or PTE
1004  * 5:3 reserved
1005  * 2 C
1006  * 1 system
1007  * 0 valid
1008  */
1009
1010 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1011
1012 {
1013         switch (flags) {
1014         case AMDGPU_VM_MTYPE_DEFAULT:
1015                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1016         case AMDGPU_VM_MTYPE_NC:
1017                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1018         case AMDGPU_VM_MTYPE_WC:
1019                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1020         case AMDGPU_VM_MTYPE_RW:
1021                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1022         case AMDGPU_VM_MTYPE_CC:
1023                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1024         case AMDGPU_VM_MTYPE_UC:
1025                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1026         default:
1027                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1028         }
1029 }
1030
1031 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1032                                 uint64_t *addr, uint64_t *flags)
1033 {
1034         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1035                 *addr = adev->vm_manager.vram_base_offset + *addr -
1036                         adev->gmc.vram_start;
1037         BUG_ON(*addr & 0xFFFF00000000003FULL);
1038
1039         if (!adev->gmc.translate_further)
1040                 return;
1041
1042         if (level == AMDGPU_VM_PDB1) {
1043                 /* Set the block fragment size */
1044                 if (!(*flags & AMDGPU_PDE_PTE))
1045                         *flags |= AMDGPU_PDE_BFS(0x9);
1046
1047         } else if (level == AMDGPU_VM_PDB0) {
1048                 if (*flags & AMDGPU_PDE_PTE)
1049                         *flags &= ~AMDGPU_PDE_PTE;
1050                 else
1051                         *flags |= AMDGPU_PTE_TF;
1052         }
1053 }
1054
1055 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1056                                 struct amdgpu_bo_va_mapping *mapping,
1057                                 uint64_t *flags)
1058 {
1059         *flags &= ~AMDGPU_PTE_EXECUTABLE;
1060         *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1061
1062         *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1063         *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1064
1065         if (mapping->flags & AMDGPU_PTE_PRT) {
1066                 *flags |= AMDGPU_PTE_PRT;
1067                 *flags &= ~AMDGPU_PTE_VALID;
1068         }
1069
1070         if (adev->asic_type == CHIP_ARCTURUS &&
1071             !(*flags & AMDGPU_PTE_SYSTEM) &&
1072             mapping->bo_va->is_xgmi)
1073                 *flags |= AMDGPU_PTE_SNOOPED;
1074 }
1075
1076 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1077 {
1078         u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1079         unsigned size;
1080
1081         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1082                 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1083         } else {
1084                 u32 viewport;
1085
1086                 switch (adev->asic_type) {
1087                 case CHIP_RAVEN:
1088                 case CHIP_RENOIR:
1089                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1090                         size = (REG_GET_FIELD(viewport,
1091                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1092                                 REG_GET_FIELD(viewport,
1093                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1094                                 4);
1095                         break;
1096                 case CHIP_VEGA10:
1097                 case CHIP_VEGA12:
1098                 case CHIP_VEGA20:
1099                 default:
1100                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1101                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1102                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1103                                 4);
1104                         break;
1105                 }
1106         }
1107
1108         return size;
1109 }
1110
1111 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1112         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1113         .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1114         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1115         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1116         .map_mtype = gmc_v9_0_map_mtype,
1117         .get_vm_pde = gmc_v9_0_get_vm_pde,
1118         .get_vm_pte = gmc_v9_0_get_vm_pte,
1119         .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1120 };
1121
1122 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1123 {
1124         adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1125 }
1126
1127 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1128 {
1129         switch (adev->asic_type) {
1130         case CHIP_VEGA10:
1131                 adev->umc.funcs = &umc_v6_0_funcs;
1132                 break;
1133         case CHIP_VEGA20:
1134                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1135                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1136                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1137                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1138                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1139                 adev->umc.funcs = &umc_v6_1_funcs;
1140                 break;
1141         case CHIP_ARCTURUS:
1142                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1143                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1144                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1145                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1146                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1147                 adev->umc.funcs = &umc_v6_1_funcs;
1148                 break;
1149         default:
1150                 break;
1151         }
1152 }
1153
1154 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1155 {
1156         switch (adev->asic_type) {
1157         case CHIP_ARCTURUS:
1158                 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1159                 break;
1160         default:
1161                 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1162                 break;
1163         }
1164 }
1165
1166 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1167 {
1168         adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1169 }
1170
1171 static int gmc_v9_0_early_init(void *handle)
1172 {
1173         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1174
1175         gmc_v9_0_set_gmc_funcs(adev);
1176         gmc_v9_0_set_irq_funcs(adev);
1177         gmc_v9_0_set_umc_funcs(adev);
1178         gmc_v9_0_set_mmhub_funcs(adev);
1179         gmc_v9_0_set_gfxhub_funcs(adev);
1180
1181         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1182         adev->gmc.shared_aperture_end =
1183                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1184         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1185         adev->gmc.private_aperture_end =
1186                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1187
1188         return 0;
1189 }
1190
1191 static int gmc_v9_0_late_init(void *handle)
1192 {
1193         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1194         int r;
1195
1196         amdgpu_bo_late_init(adev);
1197
1198         r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1199         if (r)
1200                 return r;
1201
1202         /*
1203          * Workaround performance drop issue with VBIOS enables partial
1204          * writes, while disables HBM ECC for vega10.
1205          */
1206         if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1207                 if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1208                         if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1209                                 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1210                 }
1211         }
1212
1213         if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1214                 adev->mmhub.funcs->reset_ras_error_count(adev);
1215
1216         r = amdgpu_gmc_ras_late_init(adev);
1217         if (r)
1218                 return r;
1219
1220         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1221 }
1222
1223 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1224                                         struct amdgpu_gmc *mc)
1225 {
1226         u64 base = 0;
1227
1228         if (!amdgpu_sriov_vf(adev))
1229                 base = adev->mmhub.funcs->get_fb_location(adev);
1230
1231         /* add the xgmi offset of the physical node */
1232         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1233         amdgpu_gmc_vram_location(adev, mc, base);
1234         amdgpu_gmc_gart_location(adev, mc);
1235         amdgpu_gmc_agp_location(adev, mc);
1236         /* base offset of vram pages */
1237         adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1238
1239         /* XXX: add the xgmi offset of the physical node? */
1240         adev->vm_manager.vram_base_offset +=
1241                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1242 }
1243
1244 /**
1245  * gmc_v9_0_mc_init - initialize the memory controller driver params
1246  *
1247  * @adev: amdgpu_device pointer
1248  *
1249  * Look up the amount of vram, vram width, and decide how to place
1250  * vram and gart within the GPU's physical address space.
1251  * Returns 0 for success.
1252  */
1253 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1254 {
1255         int r;
1256
1257         /* size in MB on si */
1258         adev->gmc.mc_vram_size =
1259                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1260         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1261
1262         if (!(adev->flags & AMD_IS_APU)) {
1263                 r = amdgpu_device_resize_fb_bar(adev);
1264                 if (r)
1265                         return r;
1266         }
1267         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1268         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1269
1270 #ifdef CONFIG_X86_64
1271         if (adev->flags & AMD_IS_APU) {
1272                 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1273                 adev->gmc.aper_size = adev->gmc.real_vram_size;
1274         }
1275 #endif
1276         /* In case the PCI BAR is larger than the actual amount of vram */
1277         adev->gmc.visible_vram_size = adev->gmc.aper_size;
1278         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1279                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1280
1281         /* set the gart size */
1282         if (amdgpu_gart_size == -1) {
1283                 switch (adev->asic_type) {
1284                 case CHIP_VEGA10:  /* all engines support GPUVM */
1285                 case CHIP_VEGA12:  /* all engines support GPUVM */
1286                 case CHIP_VEGA20:
1287                 case CHIP_ARCTURUS:
1288                 default:
1289                         adev->gmc.gart_size = 512ULL << 20;
1290                         break;
1291                 case CHIP_RAVEN:   /* DCE SG support */
1292                 case CHIP_RENOIR:
1293                         adev->gmc.gart_size = 1024ULL << 20;
1294                         break;
1295                 }
1296         } else {
1297                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1298         }
1299
1300         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1301
1302         return 0;
1303 }
1304
1305 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1306 {
1307         int r;
1308
1309         if (adev->gart.bo) {
1310                 WARN(1, "VEGA10 PCIE GART already initialized\n");
1311                 return 0;
1312         }
1313         /* Initialize common gart structure */
1314         r = amdgpu_gart_init(adev);
1315         if (r)
1316                 return r;
1317         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1318         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1319                                  AMDGPU_PTE_EXECUTABLE;
1320         return amdgpu_gart_table_vram_alloc(adev);
1321 }
1322
1323 /**
1324  * gmc_v9_0_save_registers - saves regs
1325  *
1326  * @adev: amdgpu_device pointer
1327  *
1328  * This saves potential register values that should be
1329  * restored upon resume
1330  */
1331 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1332 {
1333         if (adev->asic_type == CHIP_RAVEN)
1334                 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1335 }
1336
1337 static int gmc_v9_0_sw_init(void *handle)
1338 {
1339         int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1340         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1341
1342         adev->gfxhub.funcs->init(adev);
1343
1344         adev->mmhub.funcs->init(adev);
1345
1346         spin_lock_init(&adev->gmc.invalidate_lock);
1347
1348         r = amdgpu_atomfirmware_get_vram_info(adev,
1349                 &vram_width, &vram_type, &vram_vendor);
1350         if (amdgpu_sriov_vf(adev))
1351                 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1352                  * and DF related registers is not readable, seems hardcord is the
1353                  * only way to set the correct vram_width
1354                  */
1355                 adev->gmc.vram_width = 2048;
1356         else if (amdgpu_emu_mode != 1)
1357                 adev->gmc.vram_width = vram_width;
1358
1359         if (!adev->gmc.vram_width) {
1360                 int chansize, numchan;
1361
1362                 /* hbm memory channel size */
1363                 if (adev->flags & AMD_IS_APU)
1364                         chansize = 64;
1365                 else
1366                         chansize = 128;
1367
1368                 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1369                 adev->gmc.vram_width = numchan * chansize;
1370         }
1371
1372         adev->gmc.vram_type = vram_type;
1373         adev->gmc.vram_vendor = vram_vendor;
1374         switch (adev->asic_type) {
1375         case CHIP_RAVEN:
1376                 adev->num_vmhubs = 2;
1377
1378                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1379                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1380                 } else {
1381                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
1382                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1383                         adev->gmc.translate_further =
1384                                 adev->vm_manager.num_level > 1;
1385                 }
1386                 break;
1387         case CHIP_VEGA10:
1388         case CHIP_VEGA12:
1389         case CHIP_VEGA20:
1390         case CHIP_RENOIR:
1391                 adev->num_vmhubs = 2;
1392
1393
1394                 /*
1395                  * To fulfill 4-level page support,
1396                  * vm size is 256TB (48bit), maximum size of Vega10,
1397                  * block size 512 (9bit)
1398                  */
1399                 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1400                 if (amdgpu_sriov_vf(adev))
1401                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1402                 else
1403                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1404                 break;
1405         case CHIP_ARCTURUS:
1406                 adev->num_vmhubs = 3;
1407
1408                 /* Keep the vm size same with Vega20 */
1409                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1410                 break;
1411         default:
1412                 break;
1413         }
1414
1415         /* This interrupt is VMC page fault.*/
1416         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1417                                 &adev->gmc.vm_fault);
1418         if (r)
1419                 return r;
1420
1421         if (adev->asic_type == CHIP_ARCTURUS) {
1422                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1423                                         &adev->gmc.vm_fault);
1424                 if (r)
1425                         return r;
1426         }
1427
1428         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1429                                 &adev->gmc.vm_fault);
1430
1431         if (r)
1432                 return r;
1433
1434         if (!amdgpu_sriov_vf(adev)) {
1435                 /* interrupt sent to DF. */
1436                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1437                                       &adev->gmc.ecc_irq);
1438                 if (r)
1439                         return r;
1440         }
1441
1442         /* Set the internal MC address mask
1443          * This is the max address of the GPU's
1444          * internal address space.
1445          */
1446         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1447
1448         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1449         if (r) {
1450                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1451                 return r;
1452         }
1453         adev->need_swiotlb = drm_need_swiotlb(44);
1454
1455         if (adev->gmc.xgmi.supported) {
1456                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1457                 if (r)
1458                         return r;
1459         }
1460
1461         r = gmc_v9_0_mc_init(adev);
1462         if (r)
1463                 return r;
1464
1465         amdgpu_gmc_get_vbios_allocations(adev);
1466
1467         /* Memory manager */
1468         r = amdgpu_bo_init(adev);
1469         if (r)
1470                 return r;
1471
1472         r = gmc_v9_0_gart_init(adev);
1473         if (r)
1474                 return r;
1475
1476         /*
1477          * number of VMs
1478          * VMID 0 is reserved for System
1479          * amdgpu graphics/compute will use VMIDs 1..n-1
1480          * amdkfd will use VMIDs n..15
1481          *
1482          * The first KFD VMID is 8 for GPUs with graphics, 3 for
1483          * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1484          * for video processing.
1485          */
1486         adev->vm_manager.first_kfd_vmid =
1487                 adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
1488
1489         amdgpu_vm_manager_init(adev);
1490
1491         gmc_v9_0_save_registers(adev);
1492
1493         return 0;
1494 }
1495
1496 static int gmc_v9_0_sw_fini(void *handle)
1497 {
1498         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499
1500         amdgpu_gmc_ras_fini(adev);
1501         amdgpu_gem_force_release(adev);
1502         amdgpu_vm_manager_fini(adev);
1503         amdgpu_gart_table_vram_free(adev);
1504         amdgpu_bo_fini(adev);
1505         amdgpu_gart_fini(adev);
1506
1507         return 0;
1508 }
1509
1510 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1511 {
1512
1513         switch (adev->asic_type) {
1514         case CHIP_VEGA10:
1515                 if (amdgpu_sriov_vf(adev))
1516                         break;
1517                 fallthrough;
1518         case CHIP_VEGA20:
1519                 soc15_program_register_sequence(adev,
1520                                                 golden_settings_mmhub_1_0_0,
1521                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1522                 soc15_program_register_sequence(adev,
1523                                                 golden_settings_athub_1_0_0,
1524                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1525                 break;
1526         case CHIP_VEGA12:
1527                 break;
1528         case CHIP_RAVEN:
1529                 /* TODO for renoir */
1530                 soc15_program_register_sequence(adev,
1531                                                 golden_settings_athub_1_0_0,
1532                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1533                 break;
1534         default:
1535                 break;
1536         }
1537 }
1538
1539 /**
1540  * gmc_v9_0_restore_registers - restores regs
1541  *
1542  * @adev: amdgpu_device pointer
1543  *
1544  * This restores register values, saved at suspend.
1545  */
1546 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1547 {
1548         if (adev->asic_type == CHIP_RAVEN) {
1549                 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1550                 WARN_ON(adev->gmc.sdpif_register !=
1551                         RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1552         }
1553 }
1554
1555 /**
1556  * gmc_v9_0_gart_enable - gart enable
1557  *
1558  * @adev: amdgpu_device pointer
1559  */
1560 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1561 {
1562         int r;
1563
1564         if (adev->gart.bo == NULL) {
1565                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1566                 return -EINVAL;
1567         }
1568         r = amdgpu_gart_table_vram_pin(adev);
1569         if (r)
1570                 return r;
1571
1572         r = adev->gfxhub.funcs->gart_enable(adev);
1573         if (r)
1574                 return r;
1575
1576         r = adev->mmhub.funcs->gart_enable(adev);
1577         if (r)
1578                 return r;
1579
1580         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1581                  (unsigned)(adev->gmc.gart_size >> 20),
1582                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1583         adev->gart.ready = true;
1584         return 0;
1585 }
1586
1587 static int gmc_v9_0_hw_init(void *handle)
1588 {
1589         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1590         bool value;
1591         int r, i;
1592         u32 tmp;
1593
1594         /* The sequence of these two function calls matters.*/
1595         gmc_v9_0_init_golden_registers(adev);
1596
1597         if (adev->mode_info.num_crtc) {
1598                 if (adev->asic_type != CHIP_ARCTURUS) {
1599                         /* Lockout access through VGA aperture*/
1600                         WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1601
1602                         /* disable VGA render */
1603                         WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1604                 }
1605         }
1606
1607         amdgpu_device_program_register_sequence(adev,
1608                                                 golden_settings_vega10_hdp,
1609                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1610
1611         if (adev->mmhub.funcs->update_power_gating)
1612                 adev->mmhub.funcs->update_power_gating(adev, true);
1613
1614         switch (adev->asic_type) {
1615         case CHIP_ARCTURUS:
1616                 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1617                 break;
1618         default:
1619                 break;
1620         }
1621
1622         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1623
1624         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1625         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1626
1627         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1628         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1629
1630         /* After HDP is initialized, flush HDP.*/
1631         adev->nbio.funcs->hdp_flush(adev, NULL);
1632
1633         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1634                 value = false;
1635         else
1636                 value = true;
1637
1638         if (!amdgpu_sriov_vf(adev)) {
1639                 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1640                 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1641         }
1642         for (i = 0; i < adev->num_vmhubs; ++i)
1643                 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1644
1645         if (adev->umc.funcs && adev->umc.funcs->init_registers)
1646                 adev->umc.funcs->init_registers(adev);
1647
1648         r = gmc_v9_0_gart_enable(adev);
1649
1650         return r;
1651 }
1652
1653 /**
1654  * gmc_v9_0_gart_disable - gart disable
1655  *
1656  * @adev: amdgpu_device pointer
1657  *
1658  * This disables all VM page table.
1659  */
1660 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1661 {
1662         adev->gfxhub.funcs->gart_disable(adev);
1663         adev->mmhub.funcs->gart_disable(adev);
1664         amdgpu_gart_table_vram_unpin(adev);
1665 }
1666
1667 static int gmc_v9_0_hw_fini(void *handle)
1668 {
1669         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670
1671         if (amdgpu_sriov_vf(adev)) {
1672                 /* full access mode, so don't touch any GMC register */
1673                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1674                 return 0;
1675         }
1676
1677         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1678         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1679         gmc_v9_0_gart_disable(adev);
1680
1681         return 0;
1682 }
1683
1684 static int gmc_v9_0_suspend(void *handle)
1685 {
1686         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1687
1688         return gmc_v9_0_hw_fini(adev);
1689 }
1690
1691 static int gmc_v9_0_resume(void *handle)
1692 {
1693         int r;
1694         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695
1696         r = gmc_v9_0_hw_init(adev);
1697         if (r)
1698                 return r;
1699
1700         amdgpu_vmid_reset_all(adev);
1701
1702         return 0;
1703 }
1704
1705 static bool gmc_v9_0_is_idle(void *handle)
1706 {
1707         /* MC is always ready in GMC v9.*/
1708         return true;
1709 }
1710
1711 static int gmc_v9_0_wait_for_idle(void *handle)
1712 {
1713         /* There is no need to wait for MC idle in GMC v9.*/
1714         return 0;
1715 }
1716
1717 static int gmc_v9_0_soft_reset(void *handle)
1718 {
1719         /* XXX for emulation.*/
1720         return 0;
1721 }
1722
1723 static int gmc_v9_0_set_clockgating_state(void *handle,
1724                                         enum amd_clockgating_state state)
1725 {
1726         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1727
1728         adev->mmhub.funcs->set_clockgating(adev, state);
1729
1730         athub_v1_0_set_clockgating(adev, state);
1731
1732         return 0;
1733 }
1734
1735 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1736 {
1737         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1738
1739         adev->mmhub.funcs->get_clockgating(adev, flags);
1740
1741         athub_v1_0_get_clockgating(adev, flags);
1742 }
1743
1744 static int gmc_v9_0_set_powergating_state(void *handle,
1745                                         enum amd_powergating_state state)
1746 {
1747         return 0;
1748 }
1749
1750 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1751         .name = "gmc_v9_0",
1752         .early_init = gmc_v9_0_early_init,
1753         .late_init = gmc_v9_0_late_init,
1754         .sw_init = gmc_v9_0_sw_init,
1755         .sw_fini = gmc_v9_0_sw_fini,
1756         .hw_init = gmc_v9_0_hw_init,
1757         .hw_fini = gmc_v9_0_hw_fini,
1758         .suspend = gmc_v9_0_suspend,
1759         .resume = gmc_v9_0_resume,
1760         .is_idle = gmc_v9_0_is_idle,
1761         .wait_for_idle = gmc_v9_0_wait_for_idle,
1762         .soft_reset = gmc_v9_0_soft_reset,
1763         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1764         .set_powergating_state = gmc_v9_0_set_powergating_state,
1765         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1766 };
1767
1768 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1769 {
1770         .type = AMD_IP_BLOCK_TYPE_GMC,
1771         .major = 9,
1772         .minor = 0,
1773         .rev = 0,
1774         .funcs = &gmc_v9_0_ip_funcs,
1775 };