Merge tag 'devicetree-fixes-for-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_sh_mask.h"
42 #include "athub/athub_1_0_offset.h"
43 #include "oss/osssys_4_0_offset.h"
44
45 #include "soc15.h"
46 #include "soc15d.h"
47 #include "soc15_common.h"
48 #include "umc/umc_6_0_sh_mask.h"
49
50 #include "gfxhub_v1_0.h"
51 #include "mmhub_v1_0.h"
52 #include "athub_v1_0.h"
53 #include "gfxhub_v1_1.h"
54 #include "mmhub_v9_4.h"
55 #include "umc_v6_1.h"
56 #include "umc_v6_0.h"
57
58 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
59
60 #include "amdgpu_ras.h"
61 #include "amdgpu_xgmi.h"
62
63 /* add these here since we already include dce12 headers and these are for DCN */
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
65 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
70 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
71 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
72
73
74 static const char *gfxhub_client_ids[] = {
75         "CB",
76         "DB",
77         "IA",
78         "WD",
79         "CPF",
80         "CPC",
81         "CPG",
82         "RLC",
83         "TCP",
84         "SQC (inst)",
85         "SQC (data)",
86         "SQG",
87         "PA",
88 };
89
90 static const char *mmhub_client_ids_raven[][2] = {
91         [0][0] = "MP1",
92         [1][0] = "MP0",
93         [2][0] = "VCN",
94         [3][0] = "VCNU",
95         [4][0] = "HDP",
96         [5][0] = "DCE",
97         [13][0] = "UTCL2",
98         [19][0] = "TLS",
99         [26][0] = "OSS",
100         [27][0] = "SDMA0",
101         [0][1] = "MP1",
102         [1][1] = "MP0",
103         [2][1] = "VCN",
104         [3][1] = "VCNU",
105         [4][1] = "HDP",
106         [5][1] = "XDP",
107         [6][1] = "DBGU0",
108         [7][1] = "DCE",
109         [8][1] = "DCEDWB0",
110         [9][1] = "DCEDWB1",
111         [26][1] = "OSS",
112         [27][1] = "SDMA0",
113 };
114
115 static const char *mmhub_client_ids_renoir[][2] = {
116         [0][0] = "MP1",
117         [1][0] = "MP0",
118         [2][0] = "HDP",
119         [4][0] = "DCEDMC",
120         [5][0] = "DCEVGA",
121         [13][0] = "UTCL2",
122         [19][0] = "TLS",
123         [26][0] = "OSS",
124         [27][0] = "SDMA0",
125         [28][0] = "VCN",
126         [29][0] = "VCNU",
127         [30][0] = "JPEG",
128         [0][1] = "MP1",
129         [1][1] = "MP0",
130         [2][1] = "HDP",
131         [3][1] = "XDP",
132         [6][1] = "DBGU0",
133         [7][1] = "DCEDMC",
134         [8][1] = "DCEVGA",
135         [9][1] = "DCEDWB",
136         [26][1] = "OSS",
137         [27][1] = "SDMA0",
138         [28][1] = "VCN",
139         [29][1] = "VCNU",
140         [30][1] = "JPEG",
141 };
142
143 static const char *mmhub_client_ids_vega10[][2] = {
144         [0][0] = "MP0",
145         [1][0] = "UVD",
146         [2][0] = "UVDU",
147         [3][0] = "HDP",
148         [13][0] = "UTCL2",
149         [14][0] = "OSS",
150         [15][0] = "SDMA1",
151         [32+0][0] = "VCE0",
152         [32+1][0] = "VCE0U",
153         [32+2][0] = "XDMA",
154         [32+3][0] = "DCE",
155         [32+4][0] = "MP1",
156         [32+14][0] = "SDMA0",
157         [0][1] = "MP0",
158         [1][1] = "UVD",
159         [2][1] = "UVDU",
160         [3][1] = "DBGU0",
161         [4][1] = "HDP",
162         [5][1] = "XDP",
163         [14][1] = "OSS",
164         [15][1] = "SDMA0",
165         [32+0][1] = "VCE0",
166         [32+1][1] = "VCE0U",
167         [32+2][1] = "XDMA",
168         [32+3][1] = "DCE",
169         [32+4][1] = "DCEDWB",
170         [32+5][1] = "MP1",
171         [32+6][1] = "DBGU1",
172         [32+14][1] = "SDMA1",
173 };
174
175 static const char *mmhub_client_ids_vega12[][2] = {
176         [0][0] = "MP0",
177         [1][0] = "VCE0",
178         [2][0] = "VCE0U",
179         [3][0] = "HDP",
180         [13][0] = "UTCL2",
181         [14][0] = "OSS",
182         [15][0] = "SDMA1",
183         [32+0][0] = "DCE",
184         [32+1][0] = "XDMA",
185         [32+2][0] = "UVD",
186         [32+3][0] = "UVDU",
187         [32+4][0] = "MP1",
188         [32+15][0] = "SDMA0",
189         [0][1] = "MP0",
190         [1][1] = "VCE0",
191         [2][1] = "VCE0U",
192         [3][1] = "DBGU0",
193         [4][1] = "HDP",
194         [5][1] = "XDP",
195         [14][1] = "OSS",
196         [15][1] = "SDMA0",
197         [32+0][1] = "DCE",
198         [32+1][1] = "DCEDWB",
199         [32+2][1] = "XDMA",
200         [32+3][1] = "UVD",
201         [32+4][1] = "UVDU",
202         [32+5][1] = "MP1",
203         [32+6][1] = "DBGU1",
204         [32+15][1] = "SDMA1",
205 };
206
207 static const char *mmhub_client_ids_vega20[][2] = {
208         [0][0] = "XDMA",
209         [1][0] = "DCE",
210         [2][0] = "VCE0",
211         [3][0] = "VCE0U",
212         [4][0] = "UVD",
213         [5][0] = "UVD1U",
214         [13][0] = "OSS",
215         [14][0] = "HDP",
216         [15][0] = "SDMA0",
217         [32+0][0] = "UVD",
218         [32+1][0] = "UVDU",
219         [32+2][0] = "MP1",
220         [32+3][0] = "MP0",
221         [32+12][0] = "UTCL2",
222         [32+14][0] = "SDMA1",
223         [0][1] = "XDMA",
224         [1][1] = "DCE",
225         [2][1] = "DCEDWB",
226         [3][1] = "VCE0",
227         [4][1] = "VCE0U",
228         [5][1] = "UVD1",
229         [6][1] = "UVD1U",
230         [7][1] = "DBGU0",
231         [8][1] = "XDP",
232         [13][1] = "OSS",
233         [14][1] = "HDP",
234         [15][1] = "SDMA0",
235         [32+0][1] = "UVD",
236         [32+1][1] = "UVDU",
237         [32+2][1] = "DBGU1",
238         [32+3][1] = "MP1",
239         [32+4][1] = "MP0",
240         [32+14][1] = "SDMA1",
241 };
242
243 static const char *mmhub_client_ids_arcturus[][2] = {
244         [2][0] = "MP1",
245         [3][0] = "MP0",
246         [10][0] = "UTCL2",
247         [13][0] = "OSS",
248         [14][0] = "HDP",
249         [15][0] = "SDMA0",
250         [32+15][0] = "SDMA1",
251         [64+15][0] = "SDMA2",
252         [96+15][0] = "SDMA3",
253         [128+15][0] = "SDMA4",
254         [160+11][0] = "JPEG",
255         [160+12][0] = "VCN",
256         [160+13][0] = "VCNU",
257         [160+15][0] = "SDMA5",
258         [192+10][0] = "UTCL2",
259         [192+11][0] = "JPEG1",
260         [192+12][0] = "VCN1",
261         [192+13][0] = "VCN1U",
262         [192+15][0] = "SDMA6",
263         [224+15][0] = "SDMA7",
264         [0][1] = "DBGU1",
265         [1][1] = "XDP",
266         [2][1] = "MP1",
267         [3][1] = "MP0",
268         [13][1] = "OSS",
269         [14][1] = "HDP",
270         [15][1] = "SDMA0",
271         [32+15][1] = "SDMA1",
272         [64+15][1] = "SDMA2",
273         [96+15][1] = "SDMA3",
274         [128+15][1] = "SDMA4",
275         [160+11][1] = "JPEG",
276         [160+12][1] = "VCN",
277         [160+13][1] = "VCNU",
278         [160+15][1] = "SDMA5",
279         [192+11][1] = "JPEG1",
280         [192+12][1] = "VCN1",
281         [192+13][1] = "VCN1U",
282         [192+15][1] = "SDMA6",
283         [224+15][1] = "SDMA7",
284 };
285
286 static const u32 golden_settings_vega10_hdp[] =
287 {
288         0xf64, 0x0fffffff, 0x00000000,
289         0xf65, 0x0fffffff, 0x00000000,
290         0xf66, 0x0fffffff, 0x00000000,
291         0xf67, 0x0fffffff, 0x00000000,
292         0xf68, 0x0fffffff, 0x00000000,
293         0xf6a, 0x0fffffff, 0x00000000,
294         0xf6b, 0x0fffffff, 0x00000000,
295         0xf6c, 0x0fffffff, 0x00000000,
296         0xf6d, 0x0fffffff, 0x00000000,
297         0xf6e, 0x0fffffff, 0x00000000,
298 };
299
300 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
301 {
302         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
303         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
304 };
305
306 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
307 {
308         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
309         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
310 };
311
312 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
313         (0x000143c0 + 0x00000000),
314         (0x000143c0 + 0x00000800),
315         (0x000143c0 + 0x00001000),
316         (0x000143c0 + 0x00001800),
317         (0x000543c0 + 0x00000000),
318         (0x000543c0 + 0x00000800),
319         (0x000543c0 + 0x00001000),
320         (0x000543c0 + 0x00001800),
321         (0x000943c0 + 0x00000000),
322         (0x000943c0 + 0x00000800),
323         (0x000943c0 + 0x00001000),
324         (0x000943c0 + 0x00001800),
325         (0x000d43c0 + 0x00000000),
326         (0x000d43c0 + 0x00000800),
327         (0x000d43c0 + 0x00001000),
328         (0x000d43c0 + 0x00001800),
329         (0x001143c0 + 0x00000000),
330         (0x001143c0 + 0x00000800),
331         (0x001143c0 + 0x00001000),
332         (0x001143c0 + 0x00001800),
333         (0x001543c0 + 0x00000000),
334         (0x001543c0 + 0x00000800),
335         (0x001543c0 + 0x00001000),
336         (0x001543c0 + 0x00001800),
337         (0x001943c0 + 0x00000000),
338         (0x001943c0 + 0x00000800),
339         (0x001943c0 + 0x00001000),
340         (0x001943c0 + 0x00001800),
341         (0x001d43c0 + 0x00000000),
342         (0x001d43c0 + 0x00000800),
343         (0x001d43c0 + 0x00001000),
344         (0x001d43c0 + 0x00001800),
345 };
346
347 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
348         (0x000143e0 + 0x00000000),
349         (0x000143e0 + 0x00000800),
350         (0x000143e0 + 0x00001000),
351         (0x000143e0 + 0x00001800),
352         (0x000543e0 + 0x00000000),
353         (0x000543e0 + 0x00000800),
354         (0x000543e0 + 0x00001000),
355         (0x000543e0 + 0x00001800),
356         (0x000943e0 + 0x00000000),
357         (0x000943e0 + 0x00000800),
358         (0x000943e0 + 0x00001000),
359         (0x000943e0 + 0x00001800),
360         (0x000d43e0 + 0x00000000),
361         (0x000d43e0 + 0x00000800),
362         (0x000d43e0 + 0x00001000),
363         (0x000d43e0 + 0x00001800),
364         (0x001143e0 + 0x00000000),
365         (0x001143e0 + 0x00000800),
366         (0x001143e0 + 0x00001000),
367         (0x001143e0 + 0x00001800),
368         (0x001543e0 + 0x00000000),
369         (0x001543e0 + 0x00000800),
370         (0x001543e0 + 0x00001000),
371         (0x001543e0 + 0x00001800),
372         (0x001943e0 + 0x00000000),
373         (0x001943e0 + 0x00000800),
374         (0x001943e0 + 0x00001000),
375         (0x001943e0 + 0x00001800),
376         (0x001d43e0 + 0x00000000),
377         (0x001d43e0 + 0x00000800),
378         (0x001d43e0 + 0x00001000),
379         (0x001d43e0 + 0x00001800),
380 };
381
382 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
383                 struct amdgpu_irq_src *src,
384                 unsigned type,
385                 enum amdgpu_interrupt_state state)
386 {
387         u32 bits, i, tmp, reg;
388
389         /* Devices newer then VEGA10/12 shall have these programming
390              sequences performed by PSP BL */
391         if (adev->asic_type >= CHIP_VEGA20)
392                 return 0;
393
394         bits = 0x7f;
395
396         switch (state) {
397         case AMDGPU_IRQ_STATE_DISABLE:
398                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
399                         reg = ecc_umc_mcumc_ctrl_addrs[i];
400                         tmp = RREG32(reg);
401                         tmp &= ~bits;
402                         WREG32(reg, tmp);
403                 }
404                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
405                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
406                         tmp = RREG32(reg);
407                         tmp &= ~bits;
408                         WREG32(reg, tmp);
409                 }
410                 break;
411         case AMDGPU_IRQ_STATE_ENABLE:
412                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
413                         reg = ecc_umc_mcumc_ctrl_addrs[i];
414                         tmp = RREG32(reg);
415                         tmp |= bits;
416                         WREG32(reg, tmp);
417                 }
418                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
419                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
420                         tmp = RREG32(reg);
421                         tmp |= bits;
422                         WREG32(reg, tmp);
423                 }
424                 break;
425         default:
426                 break;
427         }
428
429         return 0;
430 }
431
432 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
433                                         struct amdgpu_irq_src *src,
434                                         unsigned type,
435                                         enum amdgpu_interrupt_state state)
436 {
437         struct amdgpu_vmhub *hub;
438         u32 tmp, reg, bits, i, j;
439
440         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
441                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
442                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
443                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
444                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
445                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
446                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
447
448         switch (state) {
449         case AMDGPU_IRQ_STATE_DISABLE:
450                 for (j = 0; j < adev->num_vmhubs; j++) {
451                         hub = &adev->vmhub[j];
452                         for (i = 0; i < 16; i++) {
453                                 reg = hub->vm_context0_cntl + i;
454                                 tmp = RREG32(reg);
455                                 tmp &= ~bits;
456                                 WREG32(reg, tmp);
457                         }
458                 }
459                 break;
460         case AMDGPU_IRQ_STATE_ENABLE:
461                 for (j = 0; j < adev->num_vmhubs; j++) {
462                         hub = &adev->vmhub[j];
463                         for (i = 0; i < 16; i++) {
464                                 reg = hub->vm_context0_cntl + i;
465                                 tmp = RREG32(reg);
466                                 tmp |= bits;
467                                 WREG32(reg, tmp);
468                         }
469                 }
470                 break;
471         default:
472                 break;
473         }
474
475         return 0;
476 }
477
478 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
479                                       struct amdgpu_irq_src *source,
480                                       struct amdgpu_iv_entry *entry)
481 {
482         bool retry_fault = !!(entry->src_data[1] & 0x80);
483         uint32_t status = 0, cid = 0, rw = 0;
484         struct amdgpu_task_info task_info;
485         struct amdgpu_vmhub *hub;
486         const char *mmhub_cid;
487         const char *hub_name;
488         u64 addr;
489
490         addr = (u64)entry->src_data[0] << 12;
491         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
492
493         if (retry_fault) {
494                 /* Returning 1 here also prevents sending the IV to the KFD */
495
496                 /* Process it onyl if it's the first fault for this address */
497                 if (entry->ih != &adev->irq.ih_soft &&
498                     amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
499                                              entry->timestamp))
500                         return 1;
501
502                 /* Delegate it to a different ring if the hardware hasn't
503                  * already done it.
504                  */
505                 if (in_interrupt()) {
506                         amdgpu_irq_delegate(adev, entry, 8);
507                         return 1;
508                 }
509
510                 /* Try to handle the recoverable page faults by filling page
511                  * tables
512                  */
513                 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
514                         return 1;
515         }
516
517         if (!printk_ratelimit())
518                 return 0;
519
520         if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
521                 hub_name = "mmhub0";
522                 hub = &adev->vmhub[AMDGPU_MMHUB_0];
523         } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
524                 hub_name = "mmhub1";
525                 hub = &adev->vmhub[AMDGPU_MMHUB_1];
526         } else {
527                 hub_name = "gfxhub0";
528                 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
529         }
530
531         memset(&task_info, 0, sizeof(struct amdgpu_task_info));
532         amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
533
534         dev_err(adev->dev,
535                 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
536                 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
537                 hub_name, retry_fault ? "retry" : "no-retry",
538                 entry->src_id, entry->ring_id, entry->vmid,
539                 entry->pasid, task_info.process_name, task_info.tgid,
540                 task_info.task_name, task_info.pid);
541         dev_err(adev->dev, "  in page starting at address 0x%012llx from client %d\n",
542                 addr, entry->client_id);
543
544         if (amdgpu_sriov_vf(adev))
545                 return 0;
546
547         /*
548          * Issue a dummy read to wait for the status register to
549          * be updated to avoid reading an incorrect value due to
550          * the new fast GRBM interface.
551          */
552         if (entry->vmid_src == AMDGPU_GFXHUB_0)
553                 RREG32(hub->vm_l2_pro_fault_status);
554
555         status = RREG32(hub->vm_l2_pro_fault_status);
556         cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
557         rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
558         WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
559
560
561         dev_err(adev->dev,
562                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
563                 status);
564         if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
565                 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
566                         cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
567                         gfxhub_client_ids[cid],
568                         cid);
569         } else {
570                 switch (adev->asic_type) {
571                 case CHIP_VEGA10:
572                         mmhub_cid = mmhub_client_ids_vega10[cid][rw];
573                         break;
574                 case CHIP_VEGA12:
575                         mmhub_cid = mmhub_client_ids_vega12[cid][rw];
576                         break;
577                 case CHIP_VEGA20:
578                         mmhub_cid = mmhub_client_ids_vega20[cid][rw];
579                         break;
580                 case CHIP_ARCTURUS:
581                         mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
582                         break;
583                 case CHIP_RAVEN:
584                         mmhub_cid = mmhub_client_ids_raven[cid][rw];
585                         break;
586                 case CHIP_RENOIR:
587                         mmhub_cid = mmhub_client_ids_renoir[cid][rw];
588                         break;
589                 default:
590                         mmhub_cid = NULL;
591                         break;
592                 }
593                 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
594                         mmhub_cid ? mmhub_cid : "unknown", cid);
595         }
596         dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
597                 REG_GET_FIELD(status,
598                 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
599         dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
600                 REG_GET_FIELD(status,
601                 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
602         dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
603                 REG_GET_FIELD(status,
604                 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
605         dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
606                 REG_GET_FIELD(status,
607                 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
608         dev_err(adev->dev, "\t RW: 0x%x\n", rw);
609         return 0;
610 }
611
612 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
613         .set = gmc_v9_0_vm_fault_interrupt_state,
614         .process = gmc_v9_0_process_interrupt,
615 };
616
617
618 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
619         .set = gmc_v9_0_ecc_interrupt_state,
620         .process = amdgpu_umc_process_ecc_irq,
621 };
622
623 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
624 {
625         adev->gmc.vm_fault.num_types = 1;
626         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
627
628         if (!amdgpu_sriov_vf(adev)) {
629                 adev->gmc.ecc_irq.num_types = 1;
630                 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
631         }
632 }
633
634 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
635                                         uint32_t flush_type)
636 {
637         u32 req = 0;
638
639         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
640                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
641         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
642         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
643         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
644         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
645         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
646         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
647         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
648                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
649
650         return req;
651 }
652
653 /**
654  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
655  *
656  * @adev: amdgpu_device pointer
657  * @vmhub: vmhub type
658  *
659  */
660 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
661                                        uint32_t vmhub)
662 {
663         return ((vmhub == AMDGPU_MMHUB_0 ||
664                  vmhub == AMDGPU_MMHUB_1) &&
665                 (!amdgpu_sriov_vf(adev)) &&
666                 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
667                    (adev->apu_flags & AMD_APU_IS_PICASSO))));
668 }
669
670 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
671                                         uint8_t vmid, uint16_t *p_pasid)
672 {
673         uint32_t value;
674
675         value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
676                      + vmid);
677         *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
678
679         return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
680 }
681
682 /*
683  * GART
684  * VMID 0 is the physical GPU addresses as used by the kernel.
685  * VMIDs 1-15 are used for userspace clients and are handled
686  * by the amdgpu vm/hsa code.
687  */
688
689 /**
690  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
691  *
692  * @adev: amdgpu_device pointer
693  * @vmid: vm instance to flush
694  * @vmhub: which hub to flush
695  * @flush_type: the flush type
696  *
697  * Flush the TLB for the requested page table using certain type.
698  */
699 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
700                                         uint32_t vmhub, uint32_t flush_type)
701 {
702         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
703         const unsigned eng = 17;
704         u32 j, inv_req, inv_req2, tmp;
705         struct amdgpu_vmhub *hub;
706
707         BUG_ON(vmhub >= adev->num_vmhubs);
708
709         hub = &adev->vmhub[vmhub];
710         if (adev->gmc.xgmi.num_physical_nodes &&
711             adev->asic_type == CHIP_VEGA20) {
712                 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
713                  * heavy-weight TLB flush (type 2), which flushes
714                  * both. Due to a race condition with concurrent
715                  * memory accesses using the same TLB cache line, we
716                  * still need a second TLB flush after this.
717                  */
718                 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
719                 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
720         } else {
721                 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
722                 inv_req2 = 0;
723         }
724
725         /* This is necessary for a HW workaround under SRIOV as well
726          * as GFXOFF under bare metal
727          */
728         if (adev->gfx.kiq.ring.sched.ready &&
729             (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
730             down_read_trylock(&adev->reset_sem)) {
731                 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
732                 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
733
734                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
735                                                    1 << vmid);
736                 up_read(&adev->reset_sem);
737                 return;
738         }
739
740         spin_lock(&adev->gmc.invalidate_lock);
741
742         /*
743          * It may lose gpuvm invalidate acknowldege state across power-gating
744          * off cycle, add semaphore acquire before invalidation and semaphore
745          * release after invalidation to avoid entering power gated state
746          * to WA the Issue
747          */
748
749         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
750         if (use_semaphore) {
751                 for (j = 0; j < adev->usec_timeout; j++) {
752                         /* a read return value of 1 means semaphore acuqire */
753                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
754                                             hub->eng_distance * eng);
755                         if (tmp & 0x1)
756                                 break;
757                         udelay(1);
758                 }
759
760                 if (j >= adev->usec_timeout)
761                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
762         }
763
764         do {
765                 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
766                               hub->eng_distance * eng, inv_req);
767
768                 /*
769                  * Issue a dummy read to wait for the ACK register to
770                  * be cleared to avoid a false ACK due to the new fast
771                  * GRBM interface.
772                  */
773                 if (vmhub == AMDGPU_GFXHUB_0)
774                         RREG32_NO_KIQ(hub->vm_inv_eng0_req +
775                                       hub->eng_distance * eng);
776
777                 for (j = 0; j < adev->usec_timeout; j++) {
778                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
779                                             hub->eng_distance * eng);
780                         if (tmp & (1 << vmid))
781                                 break;
782                         udelay(1);
783                 }
784
785                 inv_req = inv_req2;
786                 inv_req2 = 0;
787         } while (inv_req);
788
789         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
790         if (use_semaphore)
791                 /*
792                  * add semaphore release after invalidation,
793                  * write with 0 means semaphore release
794                  */
795                 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
796                               hub->eng_distance * eng, 0);
797
798         spin_unlock(&adev->gmc.invalidate_lock);
799
800         if (j < adev->usec_timeout)
801                 return;
802
803         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
804 }
805
806 /**
807  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
808  *
809  * @adev: amdgpu_device pointer
810  * @pasid: pasid to be flush
811  * @flush_type: the flush type
812  * @all_hub: flush all hubs
813  *
814  * Flush the TLB for the requested pasid.
815  */
816 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
817                                         uint16_t pasid, uint32_t flush_type,
818                                         bool all_hub)
819 {
820         int vmid, i;
821         signed long r;
822         uint32_t seq;
823         uint16_t queried_pasid;
824         bool ret;
825         struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
826         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
827
828         if (amdgpu_in_reset(adev))
829                 return -EIO;
830
831         if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
832                 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
833                  * heavy-weight TLB flush (type 2), which flushes
834                  * both. Due to a race condition with concurrent
835                  * memory accesses using the same TLB cache line, we
836                  * still need a second TLB flush after this.
837                  */
838                 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
839                                        adev->asic_type == CHIP_VEGA20);
840                 /* 2 dwords flush + 8 dwords fence */
841                 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
842
843                 if (vega20_xgmi_wa)
844                         ndw += kiq->pmf->invalidate_tlbs_size;
845
846                 spin_lock(&adev->gfx.kiq.ring_lock);
847                 /* 2 dwords flush + 8 dwords fence */
848                 amdgpu_ring_alloc(ring, ndw);
849                 if (vega20_xgmi_wa)
850                         kiq->pmf->kiq_invalidate_tlbs(ring,
851                                                       pasid, 2, all_hub);
852                 kiq->pmf->kiq_invalidate_tlbs(ring,
853                                         pasid, flush_type, all_hub);
854                 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
855                 if (r) {
856                         amdgpu_ring_undo(ring);
857                         spin_unlock(&adev->gfx.kiq.ring_lock);
858                         up_read(&adev->reset_sem);
859                         return -ETIME;
860                 }
861
862                 amdgpu_ring_commit(ring);
863                 spin_unlock(&adev->gfx.kiq.ring_lock);
864                 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
865                 if (r < 1) {
866                         dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
867                         up_read(&adev->reset_sem);
868                         return -ETIME;
869                 }
870                 up_read(&adev->reset_sem);
871                 return 0;
872         }
873
874         for (vmid = 1; vmid < 16; vmid++) {
875
876                 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
877                                 &queried_pasid);
878                 if (ret && queried_pasid == pasid) {
879                         if (all_hub) {
880                                 for (i = 0; i < adev->num_vmhubs; i++)
881                                         gmc_v9_0_flush_gpu_tlb(adev, vmid,
882                                                         i, flush_type);
883                         } else {
884                                 gmc_v9_0_flush_gpu_tlb(adev, vmid,
885                                                 AMDGPU_GFXHUB_0, flush_type);
886                         }
887                         break;
888                 }
889         }
890
891         return 0;
892
893 }
894
895 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
896                                             unsigned vmid, uint64_t pd_addr)
897 {
898         bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
899         struct amdgpu_device *adev = ring->adev;
900         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
901         uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
902         unsigned eng = ring->vm_inv_eng;
903
904         /*
905          * It may lose gpuvm invalidate acknowldege state across power-gating
906          * off cycle, add semaphore acquire before invalidation and semaphore
907          * release after invalidation to avoid entering power gated state
908          * to WA the Issue
909          */
910
911         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
912         if (use_semaphore)
913                 /* a read return value of 1 means semaphore acuqire */
914                 amdgpu_ring_emit_reg_wait(ring,
915                                           hub->vm_inv_eng0_sem +
916                                           hub->eng_distance * eng, 0x1, 0x1);
917
918         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
919                               (hub->ctx_addr_distance * vmid),
920                               lower_32_bits(pd_addr));
921
922         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
923                               (hub->ctx_addr_distance * vmid),
924                               upper_32_bits(pd_addr));
925
926         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
927                                             hub->eng_distance * eng,
928                                             hub->vm_inv_eng0_ack +
929                                             hub->eng_distance * eng,
930                                             req, 1 << vmid);
931
932         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
933         if (use_semaphore)
934                 /*
935                  * add semaphore release after invalidation,
936                  * write with 0 means semaphore release
937                  */
938                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
939                                       hub->eng_distance * eng, 0);
940
941         return pd_addr;
942 }
943
944 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
945                                         unsigned pasid)
946 {
947         struct amdgpu_device *adev = ring->adev;
948         uint32_t reg;
949
950         /* Do nothing because there's no lut register for mmhub1. */
951         if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
952                 return;
953
954         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
955                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
956         else
957                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
958
959         amdgpu_ring_emit_wreg(ring, reg, pasid);
960 }
961
962 /*
963  * PTE format on VEGA 10:
964  * 63:59 reserved
965  * 58:57 mtype
966  * 56 F
967  * 55 L
968  * 54 P
969  * 53 SW
970  * 52 T
971  * 50:48 reserved
972  * 47:12 4k physical page base address
973  * 11:7 fragment
974  * 6 write
975  * 5 read
976  * 4 exe
977  * 3 Z
978  * 2 snooped
979  * 1 system
980  * 0 valid
981  *
982  * PDE format on VEGA 10:
983  * 63:59 block fragment size
984  * 58:55 reserved
985  * 54 P
986  * 53:48 reserved
987  * 47:6 physical base address of PD or PTE
988  * 5:3 reserved
989  * 2 C
990  * 1 system
991  * 0 valid
992  */
993
994 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
995
996 {
997         switch (flags) {
998         case AMDGPU_VM_MTYPE_DEFAULT:
999                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1000         case AMDGPU_VM_MTYPE_NC:
1001                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1002         case AMDGPU_VM_MTYPE_WC:
1003                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1004         case AMDGPU_VM_MTYPE_RW:
1005                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1006         case AMDGPU_VM_MTYPE_CC:
1007                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1008         case AMDGPU_VM_MTYPE_UC:
1009                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1010         default:
1011                 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1012         }
1013 }
1014
1015 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1016                                 uint64_t *addr, uint64_t *flags)
1017 {
1018         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1019                 *addr = adev->vm_manager.vram_base_offset + *addr -
1020                         adev->gmc.vram_start;
1021         BUG_ON(*addr & 0xFFFF00000000003FULL);
1022
1023         if (!adev->gmc.translate_further)
1024                 return;
1025
1026         if (level == AMDGPU_VM_PDB1) {
1027                 /* Set the block fragment size */
1028                 if (!(*flags & AMDGPU_PDE_PTE))
1029                         *flags |= AMDGPU_PDE_BFS(0x9);
1030
1031         } else if (level == AMDGPU_VM_PDB0) {
1032                 if (*flags & AMDGPU_PDE_PTE)
1033                         *flags &= ~AMDGPU_PDE_PTE;
1034                 else
1035                         *flags |= AMDGPU_PTE_TF;
1036         }
1037 }
1038
1039 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1040                                 struct amdgpu_bo_va_mapping *mapping,
1041                                 uint64_t *flags)
1042 {
1043         *flags &= ~AMDGPU_PTE_EXECUTABLE;
1044         *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1045
1046         *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1047         *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1048
1049         if (mapping->flags & AMDGPU_PTE_PRT) {
1050                 *flags |= AMDGPU_PTE_PRT;
1051                 *flags &= ~AMDGPU_PTE_VALID;
1052         }
1053
1054         if (adev->asic_type == CHIP_ARCTURUS &&
1055             !(*flags & AMDGPU_PTE_SYSTEM) &&
1056             mapping->bo_va->is_xgmi)
1057                 *flags |= AMDGPU_PTE_SNOOPED;
1058 }
1059
1060 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1061 {
1062         u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1063         unsigned size;
1064
1065         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1066                 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1067         } else {
1068                 u32 viewport;
1069
1070                 switch (adev->asic_type) {
1071                 case CHIP_RAVEN:
1072                 case CHIP_RENOIR:
1073                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1074                         size = (REG_GET_FIELD(viewport,
1075                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1076                                 REG_GET_FIELD(viewport,
1077                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1078                                 4);
1079                         break;
1080                 case CHIP_VEGA10:
1081                 case CHIP_VEGA12:
1082                 case CHIP_VEGA20:
1083                 default:
1084                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1085                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1086                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1087                                 4);
1088                         break;
1089                 }
1090         }
1091
1092         return size;
1093 }
1094
1095 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1096         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1097         .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1098         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1099         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1100         .map_mtype = gmc_v9_0_map_mtype,
1101         .get_vm_pde = gmc_v9_0_get_vm_pde,
1102         .get_vm_pte = gmc_v9_0_get_vm_pte,
1103         .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1104 };
1105
1106 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1107 {
1108         adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1109 }
1110
1111 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1112 {
1113         switch (adev->asic_type) {
1114         case CHIP_VEGA10:
1115                 adev->umc.funcs = &umc_v6_0_funcs;
1116                 break;
1117         case CHIP_VEGA20:
1118                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1119                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1120                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1121                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1122                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1123                 adev->umc.funcs = &umc_v6_1_funcs;
1124                 break;
1125         case CHIP_ARCTURUS:
1126                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1127                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1128                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1129                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1130                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1131                 adev->umc.funcs = &umc_v6_1_funcs;
1132                 break;
1133         default:
1134                 break;
1135         }
1136 }
1137
1138 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1139 {
1140         switch (adev->asic_type) {
1141         case CHIP_ARCTURUS:
1142                 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1143                 break;
1144         default:
1145                 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1146                 break;
1147         }
1148 }
1149
1150 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1151 {
1152         adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1153 }
1154
1155 static int gmc_v9_0_early_init(void *handle)
1156 {
1157         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158
1159         gmc_v9_0_set_gmc_funcs(adev);
1160         gmc_v9_0_set_irq_funcs(adev);
1161         gmc_v9_0_set_umc_funcs(adev);
1162         gmc_v9_0_set_mmhub_funcs(adev);
1163         gmc_v9_0_set_gfxhub_funcs(adev);
1164
1165         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1166         adev->gmc.shared_aperture_end =
1167                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1168         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1169         adev->gmc.private_aperture_end =
1170                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1171
1172         return 0;
1173 }
1174
1175 static int gmc_v9_0_late_init(void *handle)
1176 {
1177         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1178         int r;
1179
1180         r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1181         if (r)
1182                 return r;
1183
1184         /*
1185          * Workaround performance drop issue with VBIOS enables partial
1186          * writes, while disables HBM ECC for vega10.
1187          */
1188         if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1189                 if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1190                         if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1191                                 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1192                 }
1193         }
1194
1195         if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1196                 adev->mmhub.funcs->reset_ras_error_count(adev);
1197
1198         r = amdgpu_gmc_ras_late_init(adev);
1199         if (r)
1200                 return r;
1201
1202         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1203 }
1204
1205 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1206                                         struct amdgpu_gmc *mc)
1207 {
1208         u64 base = 0;
1209
1210         if (!amdgpu_sriov_vf(adev))
1211                 base = adev->mmhub.funcs->get_fb_location(adev);
1212
1213         /* add the xgmi offset of the physical node */
1214         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1215         amdgpu_gmc_vram_location(adev, mc, base);
1216         amdgpu_gmc_gart_location(adev, mc);
1217         amdgpu_gmc_agp_location(adev, mc);
1218         /* base offset of vram pages */
1219         adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1220
1221         /* XXX: add the xgmi offset of the physical node? */
1222         adev->vm_manager.vram_base_offset +=
1223                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1224 }
1225
1226 /**
1227  * gmc_v9_0_mc_init - initialize the memory controller driver params
1228  *
1229  * @adev: amdgpu_device pointer
1230  *
1231  * Look up the amount of vram, vram width, and decide how to place
1232  * vram and gart within the GPU's physical address space.
1233  * Returns 0 for success.
1234  */
1235 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1236 {
1237         int r;
1238
1239         /* size in MB on si */
1240         adev->gmc.mc_vram_size =
1241                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1242         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1243
1244         if (!(adev->flags & AMD_IS_APU)) {
1245                 r = amdgpu_device_resize_fb_bar(adev);
1246                 if (r)
1247                         return r;
1248         }
1249         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1250         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1251
1252 #ifdef CONFIG_X86_64
1253         if (adev->flags & AMD_IS_APU) {
1254                 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1255                 adev->gmc.aper_size = adev->gmc.real_vram_size;
1256         }
1257 #endif
1258         /* In case the PCI BAR is larger than the actual amount of vram */
1259         adev->gmc.visible_vram_size = adev->gmc.aper_size;
1260         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1261                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1262
1263         /* set the gart size */
1264         if (amdgpu_gart_size == -1) {
1265                 switch (adev->asic_type) {
1266                 case CHIP_VEGA10:  /* all engines support GPUVM */
1267                 case CHIP_VEGA12:  /* all engines support GPUVM */
1268                 case CHIP_VEGA20:
1269                 case CHIP_ARCTURUS:
1270                 default:
1271                         adev->gmc.gart_size = 512ULL << 20;
1272                         break;
1273                 case CHIP_RAVEN:   /* DCE SG support */
1274                 case CHIP_RENOIR:
1275                         adev->gmc.gart_size = 1024ULL << 20;
1276                         break;
1277                 }
1278         } else {
1279                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1280         }
1281
1282         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1283
1284         return 0;
1285 }
1286
1287 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1288 {
1289         int r;
1290
1291         if (adev->gart.bo) {
1292                 WARN(1, "VEGA10 PCIE GART already initialized\n");
1293                 return 0;
1294         }
1295         /* Initialize common gart structure */
1296         r = amdgpu_gart_init(adev);
1297         if (r)
1298                 return r;
1299         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1300         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1301                                  AMDGPU_PTE_EXECUTABLE;
1302         return amdgpu_gart_table_vram_alloc(adev);
1303 }
1304
1305 /**
1306  * gmc_v9_0_save_registers - saves regs
1307  *
1308  * @adev: amdgpu_device pointer
1309  *
1310  * This saves potential register values that should be
1311  * restored upon resume
1312  */
1313 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1314 {
1315         if (adev->asic_type == CHIP_RAVEN)
1316                 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1317 }
1318
1319 static int gmc_v9_0_sw_init(void *handle)
1320 {
1321         int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1322         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323
1324         adev->gfxhub.funcs->init(adev);
1325
1326         adev->mmhub.funcs->init(adev);
1327
1328         spin_lock_init(&adev->gmc.invalidate_lock);
1329
1330         r = amdgpu_atomfirmware_get_vram_info(adev,
1331                 &vram_width, &vram_type, &vram_vendor);
1332         if (amdgpu_sriov_vf(adev))
1333                 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1334                  * and DF related registers is not readable, seems hardcord is the
1335                  * only way to set the correct vram_width
1336                  */
1337                 adev->gmc.vram_width = 2048;
1338         else if (amdgpu_emu_mode != 1)
1339                 adev->gmc.vram_width = vram_width;
1340
1341         if (!adev->gmc.vram_width) {
1342                 int chansize, numchan;
1343
1344                 /* hbm memory channel size */
1345                 if (adev->flags & AMD_IS_APU)
1346                         chansize = 64;
1347                 else
1348                         chansize = 128;
1349
1350                 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1351                 adev->gmc.vram_width = numchan * chansize;
1352         }
1353
1354         adev->gmc.vram_type = vram_type;
1355         adev->gmc.vram_vendor = vram_vendor;
1356         switch (adev->asic_type) {
1357         case CHIP_RAVEN:
1358                 adev->num_vmhubs = 2;
1359
1360                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1361                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1362                 } else {
1363                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
1364                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1365                         adev->gmc.translate_further =
1366                                 adev->vm_manager.num_level > 1;
1367                 }
1368                 break;
1369         case CHIP_VEGA10:
1370         case CHIP_VEGA12:
1371         case CHIP_VEGA20:
1372         case CHIP_RENOIR:
1373                 adev->num_vmhubs = 2;
1374
1375
1376                 /*
1377                  * To fulfill 4-level page support,
1378                  * vm size is 256TB (48bit), maximum size of Vega10,
1379                  * block size 512 (9bit)
1380                  */
1381                 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1382                 if (amdgpu_sriov_vf(adev))
1383                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1384                 else
1385                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1386                 break;
1387         case CHIP_ARCTURUS:
1388                 adev->num_vmhubs = 3;
1389
1390                 /* Keep the vm size same with Vega20 */
1391                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1392                 break;
1393         default:
1394                 break;
1395         }
1396
1397         /* This interrupt is VMC page fault.*/
1398         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1399                                 &adev->gmc.vm_fault);
1400         if (r)
1401                 return r;
1402
1403         if (adev->asic_type == CHIP_ARCTURUS) {
1404                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1405                                         &adev->gmc.vm_fault);
1406                 if (r)
1407                         return r;
1408         }
1409
1410         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1411                                 &adev->gmc.vm_fault);
1412
1413         if (r)
1414                 return r;
1415
1416         if (!amdgpu_sriov_vf(adev)) {
1417                 /* interrupt sent to DF. */
1418                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1419                                       &adev->gmc.ecc_irq);
1420                 if (r)
1421                         return r;
1422         }
1423
1424         /* Set the internal MC address mask
1425          * This is the max address of the GPU's
1426          * internal address space.
1427          */
1428         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1429
1430         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1431         if (r) {
1432                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1433                 return r;
1434         }
1435         adev->need_swiotlb = drm_need_swiotlb(44);
1436
1437         if (adev->gmc.xgmi.supported) {
1438                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1439                 if (r)
1440                         return r;
1441         }
1442
1443         r = gmc_v9_0_mc_init(adev);
1444         if (r)
1445                 return r;
1446
1447         amdgpu_gmc_get_vbios_allocations(adev);
1448
1449         /* Memory manager */
1450         r = amdgpu_bo_init(adev);
1451         if (r)
1452                 return r;
1453
1454         r = gmc_v9_0_gart_init(adev);
1455         if (r)
1456                 return r;
1457
1458         /*
1459          * number of VMs
1460          * VMID 0 is reserved for System
1461          * amdgpu graphics/compute will use VMIDs 1..n-1
1462          * amdkfd will use VMIDs n..15
1463          *
1464          * The first KFD VMID is 8 for GPUs with graphics, 3 for
1465          * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1466          * for video processing.
1467          */
1468         adev->vm_manager.first_kfd_vmid =
1469                 adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
1470
1471         amdgpu_vm_manager_init(adev);
1472
1473         gmc_v9_0_save_registers(adev);
1474
1475         return 0;
1476 }
1477
1478 static int gmc_v9_0_sw_fini(void *handle)
1479 {
1480         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481
1482         amdgpu_gmc_ras_fini(adev);
1483         amdgpu_gem_force_release(adev);
1484         amdgpu_vm_manager_fini(adev);
1485         amdgpu_gart_table_vram_free(adev);
1486         amdgpu_bo_fini(adev);
1487         amdgpu_gart_fini(adev);
1488
1489         return 0;
1490 }
1491
1492 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1493 {
1494
1495         switch (adev->asic_type) {
1496         case CHIP_VEGA10:
1497                 if (amdgpu_sriov_vf(adev))
1498                         break;
1499                 fallthrough;
1500         case CHIP_VEGA20:
1501                 soc15_program_register_sequence(adev,
1502                                                 golden_settings_mmhub_1_0_0,
1503                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1504                 soc15_program_register_sequence(adev,
1505                                                 golden_settings_athub_1_0_0,
1506                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1507                 break;
1508         case CHIP_VEGA12:
1509                 break;
1510         case CHIP_RAVEN:
1511                 /* TODO for renoir */
1512                 soc15_program_register_sequence(adev,
1513                                                 golden_settings_athub_1_0_0,
1514                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1515                 break;
1516         default:
1517                 break;
1518         }
1519 }
1520
1521 /**
1522  * gmc_v9_0_restore_registers - restores regs
1523  *
1524  * @adev: amdgpu_device pointer
1525  *
1526  * This restores register values, saved at suspend.
1527  */
1528 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1529 {
1530         if (adev->asic_type == CHIP_RAVEN) {
1531                 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1532                 WARN_ON(adev->gmc.sdpif_register !=
1533                         RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1534         }
1535 }
1536
1537 /**
1538  * gmc_v9_0_gart_enable - gart enable
1539  *
1540  * @adev: amdgpu_device pointer
1541  */
1542 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1543 {
1544         int r;
1545
1546         if (adev->gart.bo == NULL) {
1547                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1548                 return -EINVAL;
1549         }
1550         r = amdgpu_gart_table_vram_pin(adev);
1551         if (r)
1552                 return r;
1553
1554         r = adev->gfxhub.funcs->gart_enable(adev);
1555         if (r)
1556                 return r;
1557
1558         r = adev->mmhub.funcs->gart_enable(adev);
1559         if (r)
1560                 return r;
1561
1562         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1563                  (unsigned)(adev->gmc.gart_size >> 20),
1564                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1565         adev->gart.ready = true;
1566         return 0;
1567 }
1568
1569 static int gmc_v9_0_hw_init(void *handle)
1570 {
1571         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1572         bool value;
1573         int r, i;
1574         u32 tmp;
1575
1576         /* The sequence of these two function calls matters.*/
1577         gmc_v9_0_init_golden_registers(adev);
1578
1579         if (adev->mode_info.num_crtc) {
1580                 if (adev->asic_type != CHIP_ARCTURUS) {
1581                         /* Lockout access through VGA aperture*/
1582                         WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1583
1584                         /* disable VGA render */
1585                         WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1586                 }
1587         }
1588
1589         amdgpu_device_program_register_sequence(adev,
1590                                                 golden_settings_vega10_hdp,
1591                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1592
1593         if (adev->mmhub.funcs->update_power_gating)
1594                 adev->mmhub.funcs->update_power_gating(adev, true);
1595
1596         switch (adev->asic_type) {
1597         case CHIP_ARCTURUS:
1598                 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1599                 break;
1600         default:
1601                 break;
1602         }
1603
1604         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1605
1606         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1607         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1608
1609         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1610         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1611
1612         /* After HDP is initialized, flush HDP.*/
1613         adev->nbio.funcs->hdp_flush(adev, NULL);
1614
1615         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1616                 value = false;
1617         else
1618                 value = true;
1619
1620         if (!amdgpu_sriov_vf(adev)) {
1621                 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1622                 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1623         }
1624         for (i = 0; i < adev->num_vmhubs; ++i)
1625                 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1626
1627         if (adev->umc.funcs && adev->umc.funcs->init_registers)
1628                 adev->umc.funcs->init_registers(adev);
1629
1630         r = gmc_v9_0_gart_enable(adev);
1631
1632         return r;
1633 }
1634
1635 /**
1636  * gmc_v9_0_gart_disable - gart disable
1637  *
1638  * @adev: amdgpu_device pointer
1639  *
1640  * This disables all VM page table.
1641  */
1642 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1643 {
1644         adev->gfxhub.funcs->gart_disable(adev);
1645         adev->mmhub.funcs->gart_disable(adev);
1646         amdgpu_gart_table_vram_unpin(adev);
1647 }
1648
1649 static int gmc_v9_0_hw_fini(void *handle)
1650 {
1651         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1652
1653         if (amdgpu_sriov_vf(adev)) {
1654                 /* full access mode, so don't touch any GMC register */
1655                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1656                 return 0;
1657         }
1658
1659         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1660         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1661         gmc_v9_0_gart_disable(adev);
1662
1663         return 0;
1664 }
1665
1666 static int gmc_v9_0_suspend(void *handle)
1667 {
1668         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1669
1670         return gmc_v9_0_hw_fini(adev);
1671 }
1672
1673 static int gmc_v9_0_resume(void *handle)
1674 {
1675         int r;
1676         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1677
1678         r = gmc_v9_0_hw_init(adev);
1679         if (r)
1680                 return r;
1681
1682         amdgpu_vmid_reset_all(adev);
1683
1684         return 0;
1685 }
1686
1687 static bool gmc_v9_0_is_idle(void *handle)
1688 {
1689         /* MC is always ready in GMC v9.*/
1690         return true;
1691 }
1692
1693 static int gmc_v9_0_wait_for_idle(void *handle)
1694 {
1695         /* There is no need to wait for MC idle in GMC v9.*/
1696         return 0;
1697 }
1698
1699 static int gmc_v9_0_soft_reset(void *handle)
1700 {
1701         /* XXX for emulation.*/
1702         return 0;
1703 }
1704
1705 static int gmc_v9_0_set_clockgating_state(void *handle,
1706                                         enum amd_clockgating_state state)
1707 {
1708         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1709
1710         adev->mmhub.funcs->set_clockgating(adev, state);
1711
1712         athub_v1_0_set_clockgating(adev, state);
1713
1714         return 0;
1715 }
1716
1717 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1718 {
1719         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1720
1721         adev->mmhub.funcs->get_clockgating(adev, flags);
1722
1723         athub_v1_0_get_clockgating(adev, flags);
1724 }
1725
1726 static int gmc_v9_0_set_powergating_state(void *handle,
1727                                         enum amd_powergating_state state)
1728 {
1729         return 0;
1730 }
1731
1732 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1733         .name = "gmc_v9_0",
1734         .early_init = gmc_v9_0_early_init,
1735         .late_init = gmc_v9_0_late_init,
1736         .sw_init = gmc_v9_0_sw_init,
1737         .sw_fini = gmc_v9_0_sw_fini,
1738         .hw_init = gmc_v9_0_hw_init,
1739         .hw_fini = gmc_v9_0_hw_fini,
1740         .suspend = gmc_v9_0_suspend,
1741         .resume = gmc_v9_0_resume,
1742         .is_idle = gmc_v9_0_is_idle,
1743         .wait_for_idle = gmc_v9_0_wait_for_idle,
1744         .soft_reset = gmc_v9_0_soft_reset,
1745         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1746         .set_powergating_state = gmc_v9_0_set_powergating_state,
1747         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1748 };
1749
1750 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1751 {
1752         .type = AMD_IP_BLOCK_TYPE_GMC,
1753         .major = 9,
1754         .minor = 0,
1755         .rev = 0,
1756         .funcs = &gmc_v9_0_ip_funcs,
1757 };