Merge tag 'exfat-for-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47
48 #include "amdgpu_ras.h"
49
50 #include "gfx_v9_4.h"
51 #include "gfx_v9_0.h"
52 #include "gfx_v9_4_2.h"
53
54 #include "asic_reg/pwr/pwr_10_0_offset.h"
55 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56 #include "asic_reg/gc/gc_9_0_default.h"
57
58 #define GFX9_NUM_GFX_RINGS     1
59 #define GFX9_MEC_HPD_SIZE 4096
60 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
61 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
62
63 #define mmGCEA_PROBE_MAP                        0x070c
64 #define mmGCEA_PROBE_MAP_BASE_IDX               0
65
66 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
72
73 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
79
80 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
86
87 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/raven_me.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
93
94 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
101
102 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
109
110 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112
113 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118
119 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
120 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
121 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
122 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
125
126 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
127 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
128 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
129
130 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
131 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
132 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
133 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
134 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
135 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
136 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
137 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
138 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
139 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
140 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
141 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
142
143 enum ta_ras_gfx_subblock {
144         /*CPC*/
145         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
146         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
147         TA_RAS_BLOCK__GFX_CPC_UCODE,
148         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
149         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
150         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
151         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
152         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
153         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
154         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
155         /* CPF*/
156         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
157         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
158         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
159         TA_RAS_BLOCK__GFX_CPF_TAG,
160         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
161         /* CPG*/
162         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
163         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
164         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
165         TA_RAS_BLOCK__GFX_CPG_TAG,
166         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
167         /* GDS*/
168         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
169         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
170         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
171         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
172         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
173         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
174         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
175         /* SPI*/
176         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
177         /* SQ*/
178         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
179         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
180         TA_RAS_BLOCK__GFX_SQ_LDS_D,
181         TA_RAS_BLOCK__GFX_SQ_LDS_I,
182         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
183         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
184         /* SQC (3 ranges)*/
185         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
186         /* SQC range 0*/
187         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
188         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
189                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
190         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
191         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
192         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
193         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
194         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
195         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
196         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
197                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
198         /* SQC range 1*/
199         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
200         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
201                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
202         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
203         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
204         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
205         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
206         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
207         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
208         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
209         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
210         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
211                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
212         /* SQC range 2*/
213         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
214         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
215                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
216         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
217         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
218         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
219         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
220         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
221         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
222         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
223         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
224         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
225                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
226         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
227         /* TA*/
228         TA_RAS_BLOCK__GFX_TA_INDEX_START,
229         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
230         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
231         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
232         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
233         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
234         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
235         /* TCA*/
236         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
237         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
238         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
239         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
240         /* TCC (5 sub-ranges)*/
241         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
242         /* TCC range 0*/
243         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
244         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
245         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
246         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
247         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
248         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
249         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
250         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
251         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
252         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
253         /* TCC range 1*/
254         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
255         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
256         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
257         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
258                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
259         /* TCC range 2*/
260         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
261         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
262         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
263         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
264         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
265         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
266         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
267         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
268         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
269         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
270                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
271         /* TCC range 3*/
272         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
273         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
274         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
275         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
276                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
277         /* TCC range 4*/
278         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
279         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
280                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
281         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
282         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
283                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
284         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
285         /* TCI*/
286         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
287         /* TCP*/
288         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
289         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
290         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
291         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
292         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
293         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
294         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
295         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
296         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
297         /* TD*/
298         TA_RAS_BLOCK__GFX_TD_INDEX_START,
299         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
300         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
301         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
302         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
303         /* EA (3 sub-ranges)*/
304         TA_RAS_BLOCK__GFX_EA_INDEX_START,
305         /* EA range 0*/
306         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
307         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
308         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
309         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
310         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
311         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
312         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
313         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
314         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
315         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
316         /* EA range 1*/
317         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
318         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
319         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
320         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
321         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
322         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
323         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
324         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
325         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
326         /* EA range 2*/
327         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
328         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
329         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
330         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
331         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
332         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
333         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
334         /* UTC VM L2 bank*/
335         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
336         /* UTC VM walker*/
337         TA_RAS_BLOCK__UTC_VML2_WALKER,
338         /* UTC ATC L2 2MB cache*/
339         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
340         /* UTC ATC L2 4KB cache*/
341         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
342         TA_RAS_BLOCK__GFX_MAX
343 };
344
345 struct ras_gfx_subblock {
346         unsigned char *name;
347         int ta_subblock;
348         int hw_supported_error_type;
349         int sw_supported_error_type;
350 };
351
352 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
353         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
354                 #subblock,                                                     \
355                 TA_RAS_BLOCK__##subblock,                                      \
356                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
357                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
358         }
359
360 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
361         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
362         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
363         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
365         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
367         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
368         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
371         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
372         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
374         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
375         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
377         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
378                              0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
380                              0),
381         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
382         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
384         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
386         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
388         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
389                              0, 0),
390         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
391                              0),
392         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
393                              0, 0),
394         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
395                              0),
396         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
397                              0, 0),
398         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
399                              0),
400         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
401                              1),
402         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
403                              0, 0, 0),
404         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
405                              0),
406         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
407                              0),
408         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
409                              0),
410         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
411                              0),
412         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
413                              0),
414         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
415                              0, 0),
416         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
417                              0),
418         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
419                              0),
420         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
421                              0, 0, 0),
422         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
423                              0),
424         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
425                              0),
426         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
427                              0),
428         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
429                              0),
430         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
431                              0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
433                              0, 0),
434         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
435                              0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
438         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
440         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
442         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
444         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
445                              1),
446         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
447                              1),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
449                              1),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
451                              0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
453                              0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
462         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
466                              0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
469                              0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
471                              0, 0),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
473                              0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
479         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
483         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
501         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
502         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
503         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
504         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
505         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
506         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
507         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
508 };
509
510 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
511 {
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
528         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
532 };
533
534 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
535 {
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
550         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
554 };
555
556 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
557 {
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
565         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
569 };
570
571 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
572 {
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
593         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
597 };
598
599 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
600 {
601         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
602         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
603         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
604         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
608 };
609
610 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
611 {
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
627         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
631 };
632
633 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
634 {
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
643         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
646         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
647 };
648
649 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
650 {
651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
654 };
655
656 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
657 {
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
670         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
674 };
675
676 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
677 {
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
687         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
691 };
692
693 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
694 {
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
698         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
699         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
700         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
701         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
702         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
703         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
704         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
705         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
706 };
707
708 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
709         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
710         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
711 };
712
713 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
714 {
715         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
720         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
721         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
722         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
723 };
724
725 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
726 {
727         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
732         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
733         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
734         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
735 };
736
737 static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
738 {
739         static void *scratch_reg0;
740         static void *scratch_reg1;
741         static void *scratch_reg2;
742         static void *scratch_reg3;
743         static void *spare_int;
744         static uint32_t grbm_cntl;
745         static uint32_t grbm_idx;
746
747         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
748         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
749         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
750         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
751         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
752
753         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
754         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
755
756         if (amdgpu_sriov_runtime(adev)) {
757                 pr_err("shouldn't call rlcg write register during runtime\n");
758                 return;
759         }
760
761         if (offset == grbm_cntl || offset == grbm_idx) {
762                 if (offset  == grbm_cntl)
763                         writel(v, scratch_reg2);
764                 else if (offset == grbm_idx)
765                         writel(v, scratch_reg3);
766
767                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
768         } else {
769                 uint32_t i = 0;
770                 uint32_t retries = 50000;
771
772                 writel(v, scratch_reg0);
773                 writel(offset | 0x80000000, scratch_reg1);
774                 writel(1, spare_int);
775                 for (i = 0; i < retries; i++) {
776                         u32 tmp;
777
778                         tmp = readl(scratch_reg1);
779                         if (!(tmp & 0x80000000))
780                                 break;
781
782                         udelay(10);
783                 }
784                 if (i >= retries)
785                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
786         }
787
788 }
789
790 static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
791                                u32 v, u32 acc_flags, u32 hwip)
792 {
793         if ((acc_flags & AMDGPU_REGS_RLC) &&
794             amdgpu_sriov_fullaccess(adev)) {
795                 gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
796
797                 return;
798         }
799
800         if (acc_flags & AMDGPU_REGS_NO_KIQ)
801                 WREG32_NO_KIQ(offset, v);
802         else
803                 WREG32(offset, v);
804 }
805
806 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
807 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
808 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
809 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
810
811 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
812 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
813 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
814 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
815 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
816                                 struct amdgpu_cu_info *cu_info);
817 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
818 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
819 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
820 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
821                                           void *ras_error_status);
822 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
823                                      void *inject_if);
824 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
825
826 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
827                                 uint64_t queue_mask)
828 {
829         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
830         amdgpu_ring_write(kiq_ring,
831                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
832                 /* vmid_mask:0* queue_type:0 (KIQ) */
833                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
834         amdgpu_ring_write(kiq_ring,
835                         lower_32_bits(queue_mask));     /* queue mask lo */
836         amdgpu_ring_write(kiq_ring,
837                         upper_32_bits(queue_mask));     /* queue mask hi */
838         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
839         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
840         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
841         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
842 }
843
844 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
845                                  struct amdgpu_ring *ring)
846 {
847         struct amdgpu_device *adev = kiq_ring->adev;
848         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
849         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
850         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
851
852         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
853         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
854         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
855                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
856                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
857                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
858                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
859                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
860                          /*queue_type: normal compute queue */
861                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
862                          /* alloc format: all_on_one_pipe */
863                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
864                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
865                          /* num_queues: must be 1 */
866                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
867         amdgpu_ring_write(kiq_ring,
868                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
869         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
870         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
871         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
872         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
873 }
874
875 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
876                                    struct amdgpu_ring *ring,
877                                    enum amdgpu_unmap_queues_action action,
878                                    u64 gpu_addr, u64 seq)
879 {
880         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
881
882         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
883         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
884                           PACKET3_UNMAP_QUEUES_ACTION(action) |
885                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
886                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
887                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
888         amdgpu_ring_write(kiq_ring,
889                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
890
891         if (action == PREEMPT_QUEUES_NO_UNMAP) {
892                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
893                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
894                 amdgpu_ring_write(kiq_ring, seq);
895         } else {
896                 amdgpu_ring_write(kiq_ring, 0);
897                 amdgpu_ring_write(kiq_ring, 0);
898                 amdgpu_ring_write(kiq_ring, 0);
899         }
900 }
901
902 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
903                                    struct amdgpu_ring *ring,
904                                    u64 addr,
905                                    u64 seq)
906 {
907         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
908
909         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
910         amdgpu_ring_write(kiq_ring,
911                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
912                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
913                           PACKET3_QUERY_STATUS_COMMAND(2));
914         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
915         amdgpu_ring_write(kiq_ring,
916                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
917                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
918         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
919         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
920         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
921         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
922 }
923
924 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
925                                 uint16_t pasid, uint32_t flush_type,
926                                 bool all_hub)
927 {
928         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
929         amdgpu_ring_write(kiq_ring,
930                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
931                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
932                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
933                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
934 }
935
936 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
937         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
938         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
939         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
940         .kiq_query_status = gfx_v9_0_kiq_query_status,
941         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
942         .set_resources_size = 8,
943         .map_queues_size = 7,
944         .unmap_queues_size = 6,
945         .query_status_size = 7,
946         .invalidate_tlbs_size = 2,
947 };
948
949 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
950 {
951         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
952 }
953
954 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
955 {
956         switch (adev->asic_type) {
957         case CHIP_VEGA10:
958                 soc15_program_register_sequence(adev,
959                                                 golden_settings_gc_9_0,
960                                                 ARRAY_SIZE(golden_settings_gc_9_0));
961                 soc15_program_register_sequence(adev,
962                                                 golden_settings_gc_9_0_vg10,
963                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
964                 break;
965         case CHIP_VEGA12:
966                 soc15_program_register_sequence(adev,
967                                                 golden_settings_gc_9_2_1,
968                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
969                 soc15_program_register_sequence(adev,
970                                                 golden_settings_gc_9_2_1_vg12,
971                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
972                 break;
973         case CHIP_VEGA20:
974                 soc15_program_register_sequence(adev,
975                                                 golden_settings_gc_9_0,
976                                                 ARRAY_SIZE(golden_settings_gc_9_0));
977                 soc15_program_register_sequence(adev,
978                                                 golden_settings_gc_9_0_vg20,
979                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
980                 break;
981         case CHIP_ARCTURUS:
982                 soc15_program_register_sequence(adev,
983                                                 golden_settings_gc_9_4_1_arct,
984                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
985                 break;
986         case CHIP_RAVEN:
987                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
988                                                 ARRAY_SIZE(golden_settings_gc_9_1));
989                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
990                         soc15_program_register_sequence(adev,
991                                                         golden_settings_gc_9_1_rv2,
992                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
993                 else
994                         soc15_program_register_sequence(adev,
995                                                         golden_settings_gc_9_1_rv1,
996                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
997                 break;
998          case CHIP_RENOIR:
999                 soc15_program_register_sequence(adev,
1000                                                 golden_settings_gc_9_1_rn,
1001                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
1002                 return; /* for renoir, don't need common goldensetting */
1003         case CHIP_ALDEBARAN:
1004                 gfx_v9_4_2_init_golden_registers(adev,
1005                                                  adev->smuio.funcs->get_die_id(adev));
1006                 break;
1007         default:
1008                 break;
1009         }
1010
1011         if ((adev->asic_type != CHIP_ARCTURUS) &&
1012             (adev->asic_type != CHIP_ALDEBARAN))
1013                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1014                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1015 }
1016
1017 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
1018 {
1019         adev->gfx.scratch.num_reg = 8;
1020         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1021         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
1022 }
1023
1024 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1025                                        bool wc, uint32_t reg, uint32_t val)
1026 {
1027         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1028         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1029                                 WRITE_DATA_DST_SEL(0) |
1030                                 (wc ? WR_CONFIRM : 0));
1031         amdgpu_ring_write(ring, reg);
1032         amdgpu_ring_write(ring, 0);
1033         amdgpu_ring_write(ring, val);
1034 }
1035
1036 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1037                                   int mem_space, int opt, uint32_t addr0,
1038                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1039                                   uint32_t inv)
1040 {
1041         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1042         amdgpu_ring_write(ring,
1043                                  /* memory (1) or register (0) */
1044                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1045                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1046                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1047                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1048
1049         if (mem_space)
1050                 BUG_ON(addr0 & 0x3); /* Dword align */
1051         amdgpu_ring_write(ring, addr0);
1052         amdgpu_ring_write(ring, addr1);
1053         amdgpu_ring_write(ring, ref);
1054         amdgpu_ring_write(ring, mask);
1055         amdgpu_ring_write(ring, inv); /* poll interval */
1056 }
1057
1058 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1059 {
1060         struct amdgpu_device *adev = ring->adev;
1061         uint32_t scratch;
1062         uint32_t tmp = 0;
1063         unsigned i;
1064         int r;
1065
1066         r = amdgpu_gfx_scratch_get(adev, &scratch);
1067         if (r)
1068                 return r;
1069
1070         WREG32(scratch, 0xCAFEDEAD);
1071         r = amdgpu_ring_alloc(ring, 3);
1072         if (r)
1073                 goto error_free_scratch;
1074
1075         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1076         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1077         amdgpu_ring_write(ring, 0xDEADBEEF);
1078         amdgpu_ring_commit(ring);
1079
1080         for (i = 0; i < adev->usec_timeout; i++) {
1081                 tmp = RREG32(scratch);
1082                 if (tmp == 0xDEADBEEF)
1083                         break;
1084                 udelay(1);
1085         }
1086
1087         if (i >= adev->usec_timeout)
1088                 r = -ETIMEDOUT;
1089
1090 error_free_scratch:
1091         amdgpu_gfx_scratch_free(adev, scratch);
1092         return r;
1093 }
1094
1095 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1096 {
1097         struct amdgpu_device *adev = ring->adev;
1098         struct amdgpu_ib ib;
1099         struct dma_fence *f = NULL;
1100
1101         unsigned index;
1102         uint64_t gpu_addr;
1103         uint32_t tmp;
1104         long r;
1105
1106         r = amdgpu_device_wb_get(adev, &index);
1107         if (r)
1108                 return r;
1109
1110         gpu_addr = adev->wb.gpu_addr + (index * 4);
1111         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1112         memset(&ib, 0, sizeof(ib));
1113         r = amdgpu_ib_get(adev, NULL, 16,
1114                                         AMDGPU_IB_POOL_DIRECT, &ib);
1115         if (r)
1116                 goto err1;
1117
1118         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1119         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1120         ib.ptr[2] = lower_32_bits(gpu_addr);
1121         ib.ptr[3] = upper_32_bits(gpu_addr);
1122         ib.ptr[4] = 0xDEADBEEF;
1123         ib.length_dw = 5;
1124
1125         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1126         if (r)
1127                 goto err2;
1128
1129         r = dma_fence_wait_timeout(f, false, timeout);
1130         if (r == 0) {
1131                 r = -ETIMEDOUT;
1132                 goto err2;
1133         } else if (r < 0) {
1134                 goto err2;
1135         }
1136
1137         tmp = adev->wb.wb[index];
1138         if (tmp == 0xDEADBEEF)
1139                 r = 0;
1140         else
1141                 r = -EINVAL;
1142
1143 err2:
1144         amdgpu_ib_free(adev, &ib, NULL);
1145         dma_fence_put(f);
1146 err1:
1147         amdgpu_device_wb_free(adev, index);
1148         return r;
1149 }
1150
1151
1152 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1153 {
1154         release_firmware(adev->gfx.pfp_fw);
1155         adev->gfx.pfp_fw = NULL;
1156         release_firmware(adev->gfx.me_fw);
1157         adev->gfx.me_fw = NULL;
1158         release_firmware(adev->gfx.ce_fw);
1159         adev->gfx.ce_fw = NULL;
1160         release_firmware(adev->gfx.rlc_fw);
1161         adev->gfx.rlc_fw = NULL;
1162         release_firmware(adev->gfx.mec_fw);
1163         adev->gfx.mec_fw = NULL;
1164         release_firmware(adev->gfx.mec2_fw);
1165         adev->gfx.mec2_fw = NULL;
1166
1167         kfree(adev->gfx.rlc.register_list_format);
1168 }
1169
1170 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1171 {
1172         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1173
1174         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1175         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1176         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1177         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1178         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1179         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1180         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1181         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1182         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1183         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1184         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1185         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1186         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1187         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1188                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1189 }
1190
1191 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1192 {
1193         adev->gfx.me_fw_write_wait = false;
1194         adev->gfx.mec_fw_write_wait = false;
1195
1196         if ((adev->asic_type != CHIP_ARCTURUS) &&
1197             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1198             (adev->gfx.mec_feature_version < 46) ||
1199             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1200             (adev->gfx.pfp_feature_version < 46)))
1201                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1202
1203         switch (adev->asic_type) {
1204         case CHIP_VEGA10:
1205                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1206                     (adev->gfx.me_feature_version >= 42) &&
1207                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1208                     (adev->gfx.pfp_feature_version >= 42))
1209                         adev->gfx.me_fw_write_wait = true;
1210
1211                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1212                     (adev->gfx.mec_feature_version >= 42))
1213                         adev->gfx.mec_fw_write_wait = true;
1214                 break;
1215         case CHIP_VEGA12:
1216                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1217                     (adev->gfx.me_feature_version >= 44) &&
1218                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1219                     (adev->gfx.pfp_feature_version >= 44))
1220                         adev->gfx.me_fw_write_wait = true;
1221
1222                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1223                     (adev->gfx.mec_feature_version >= 44))
1224                         adev->gfx.mec_fw_write_wait = true;
1225                 break;
1226         case CHIP_VEGA20:
1227                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1228                     (adev->gfx.me_feature_version >= 44) &&
1229                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1230                     (adev->gfx.pfp_feature_version >= 44))
1231                         adev->gfx.me_fw_write_wait = true;
1232
1233                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1234                     (adev->gfx.mec_feature_version >= 44))
1235                         adev->gfx.mec_fw_write_wait = true;
1236                 break;
1237         case CHIP_RAVEN:
1238                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1239                     (adev->gfx.me_feature_version >= 42) &&
1240                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1241                     (adev->gfx.pfp_feature_version >= 42))
1242                         adev->gfx.me_fw_write_wait = true;
1243
1244                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1245                     (adev->gfx.mec_feature_version >= 42))
1246                         adev->gfx.mec_fw_write_wait = true;
1247                 break;
1248         default:
1249                 adev->gfx.me_fw_write_wait = true;
1250                 adev->gfx.mec_fw_write_wait = true;
1251                 break;
1252         }
1253 }
1254
1255 struct amdgpu_gfxoff_quirk {
1256         u16 chip_vendor;
1257         u16 chip_device;
1258         u16 subsys_vendor;
1259         u16 subsys_device;
1260         u8 revision;
1261 };
1262
1263 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1264         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1265         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1266         /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1267         { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1268         /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1269         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1270         { 0, 0, 0, 0, 0 },
1271 };
1272
1273 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1274 {
1275         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1276
1277         while (p && p->chip_device != 0) {
1278                 if (pdev->vendor == p->chip_vendor &&
1279                     pdev->device == p->chip_device &&
1280                     pdev->subsystem_vendor == p->subsys_vendor &&
1281                     pdev->subsystem_device == p->subsys_device &&
1282                     pdev->revision == p->revision) {
1283                         return true;
1284                 }
1285                 ++p;
1286         }
1287         return false;
1288 }
1289
1290 static bool is_raven_kicker(struct amdgpu_device *adev)
1291 {
1292         if (adev->pm.fw_version >= 0x41e2b)
1293                 return true;
1294         else
1295                 return false;
1296 }
1297
1298 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1299 {
1300         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1301                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1302
1303         switch (adev->asic_type) {
1304         case CHIP_VEGA10:
1305         case CHIP_VEGA12:
1306         case CHIP_VEGA20:
1307                 break;
1308         case CHIP_RAVEN:
1309                 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1310                       (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1311                     ((!is_raven_kicker(adev) &&
1312                       adev->gfx.rlc_fw_version < 531) ||
1313                      (adev->gfx.rlc_feature_version < 1) ||
1314                      !adev->gfx.rlc.is_rlc_v2_1))
1315                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1316
1317                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1318                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1319                                 AMD_PG_SUPPORT_CP |
1320                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1321                 break;
1322         case CHIP_RENOIR:
1323                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1324                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1325                                 AMD_PG_SUPPORT_CP |
1326                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1327                 break;
1328         default:
1329                 break;
1330         }
1331 }
1332
1333 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1334                                           const char *chip_name)
1335 {
1336         char fw_name[30];
1337         int err;
1338         struct amdgpu_firmware_info *info = NULL;
1339         const struct common_firmware_header *header = NULL;
1340         const struct gfx_firmware_header_v1_0 *cp_hdr;
1341
1342         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1343         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1344         if (err)
1345                 goto out;
1346         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1347         if (err)
1348                 goto out;
1349         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1350         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1351         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1352
1353         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1354         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1355         if (err)
1356                 goto out;
1357         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1358         if (err)
1359                 goto out;
1360         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1361         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1362         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1363
1364         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1365         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1366         if (err)
1367                 goto out;
1368         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1369         if (err)
1370                 goto out;
1371         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1372         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1373         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1374
1375         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1376                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1377                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1378                 info->fw = adev->gfx.pfp_fw;
1379                 header = (const struct common_firmware_header *)info->fw->data;
1380                 adev->firmware.fw_size +=
1381                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1382
1383                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1384                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1385                 info->fw = adev->gfx.me_fw;
1386                 header = (const struct common_firmware_header *)info->fw->data;
1387                 adev->firmware.fw_size +=
1388                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1389
1390                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1391                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1392                 info->fw = adev->gfx.ce_fw;
1393                 header = (const struct common_firmware_header *)info->fw->data;
1394                 adev->firmware.fw_size +=
1395                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1396         }
1397
1398 out:
1399         if (err) {
1400                 dev_err(adev->dev,
1401                         "gfx9: Failed to load firmware \"%s\"\n",
1402                         fw_name);
1403                 release_firmware(adev->gfx.pfp_fw);
1404                 adev->gfx.pfp_fw = NULL;
1405                 release_firmware(adev->gfx.me_fw);
1406                 adev->gfx.me_fw = NULL;
1407                 release_firmware(adev->gfx.ce_fw);
1408                 adev->gfx.ce_fw = NULL;
1409         }
1410         return err;
1411 }
1412
1413 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1414                                           const char *chip_name)
1415 {
1416         char fw_name[30];
1417         int err;
1418         struct amdgpu_firmware_info *info = NULL;
1419         const struct common_firmware_header *header = NULL;
1420         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1421         unsigned int *tmp = NULL;
1422         unsigned int i = 0;
1423         uint16_t version_major;
1424         uint16_t version_minor;
1425         uint32_t smu_version;
1426
1427         /*
1428          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1429          * instead of picasso_rlc.bin.
1430          * Judgment method:
1431          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1432          *          or revision >= 0xD8 && revision <= 0xDF
1433          * otherwise is PCO FP5
1434          */
1435         if (!strcmp(chip_name, "picasso") &&
1436                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1437                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1438                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1439         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1440                 (smu_version >= 0x41e2b))
1441                 /**
1442                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1443                 */
1444                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1445         else
1446                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1447         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1448         if (err)
1449                 goto out;
1450         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1451         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1452
1453         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1454         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1455         if (version_major == 2 && version_minor == 1)
1456                 adev->gfx.rlc.is_rlc_v2_1 = true;
1457
1458         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1459         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1460         adev->gfx.rlc.save_and_restore_offset =
1461                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1462         adev->gfx.rlc.clear_state_descriptor_offset =
1463                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1464         adev->gfx.rlc.avail_scratch_ram_locations =
1465                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1466         adev->gfx.rlc.reg_restore_list_size =
1467                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1468         adev->gfx.rlc.reg_list_format_start =
1469                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1470         adev->gfx.rlc.reg_list_format_separate_start =
1471                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1472         adev->gfx.rlc.starting_offsets_start =
1473                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1474         adev->gfx.rlc.reg_list_format_size_bytes =
1475                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1476         adev->gfx.rlc.reg_list_size_bytes =
1477                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1478         adev->gfx.rlc.register_list_format =
1479                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1480                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1481         if (!adev->gfx.rlc.register_list_format) {
1482                 err = -ENOMEM;
1483                 goto out;
1484         }
1485
1486         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1487                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1488         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1489                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1490
1491         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1492
1493         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1494                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1495         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1496                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1497
1498         if (adev->gfx.rlc.is_rlc_v2_1)
1499                 gfx_v9_0_init_rlc_ext_microcode(adev);
1500
1501         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1502                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1503                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1504                 info->fw = adev->gfx.rlc_fw;
1505                 header = (const struct common_firmware_header *)info->fw->data;
1506                 adev->firmware.fw_size +=
1507                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1508
1509                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1510                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1511                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1512                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1513                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1514                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1515                         info->fw = adev->gfx.rlc_fw;
1516                         adev->firmware.fw_size +=
1517                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1518
1519                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1520                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1521                         info->fw = adev->gfx.rlc_fw;
1522                         adev->firmware.fw_size +=
1523                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1524
1525                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1526                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1527                         info->fw = adev->gfx.rlc_fw;
1528                         adev->firmware.fw_size +=
1529                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1530                 }
1531         }
1532
1533 out:
1534         if (err) {
1535                 dev_err(adev->dev,
1536                         "gfx9: Failed to load firmware \"%s\"\n",
1537                         fw_name);
1538                 release_firmware(adev->gfx.rlc_fw);
1539                 adev->gfx.rlc_fw = NULL;
1540         }
1541         return err;
1542 }
1543
1544 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1545 {
1546         if (adev->asic_type == CHIP_ALDEBARAN ||
1547             adev->asic_type == CHIP_ARCTURUS ||
1548             adev->asic_type == CHIP_RENOIR)
1549                 return false;
1550
1551         return true;
1552 }
1553
1554 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1555                                           const char *chip_name)
1556 {
1557         char fw_name[30];
1558         int err;
1559         struct amdgpu_firmware_info *info = NULL;
1560         const struct common_firmware_header *header = NULL;
1561         const struct gfx_firmware_header_v1_0 *cp_hdr;
1562
1563         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1564         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1565         if (err)
1566                 goto out;
1567         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1568         if (err)
1569                 goto out;
1570         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1571         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1572         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1573
1574
1575         if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1576                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1577                 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1578                 if (!err) {
1579                         err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1580                         if (err)
1581                                 goto out;
1582                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1583                         adev->gfx.mec2_fw->data;
1584                         adev->gfx.mec2_fw_version =
1585                         le32_to_cpu(cp_hdr->header.ucode_version);
1586                         adev->gfx.mec2_feature_version =
1587                         le32_to_cpu(cp_hdr->ucode_feature_version);
1588                 } else {
1589                         err = 0;
1590                         adev->gfx.mec2_fw = NULL;
1591                 }
1592         } else {
1593                 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1594                 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1595         }
1596
1597         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1598                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1599                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1600                 info->fw = adev->gfx.mec_fw;
1601                 header = (const struct common_firmware_header *)info->fw->data;
1602                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1603                 adev->firmware.fw_size +=
1604                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1605
1606                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1607                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1608                 info->fw = adev->gfx.mec_fw;
1609                 adev->firmware.fw_size +=
1610                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1611
1612                 if (adev->gfx.mec2_fw) {
1613                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1614                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1615                         info->fw = adev->gfx.mec2_fw;
1616                         header = (const struct common_firmware_header *)info->fw->data;
1617                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1618                         adev->firmware.fw_size +=
1619                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1620
1621                         /* TODO: Determine if MEC2 JT FW loading can be removed
1622                                  for all GFX V9 asic and above */
1623                         if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1624                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1625                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1626                                 info->fw = adev->gfx.mec2_fw;
1627                                 adev->firmware.fw_size +=
1628                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1629                                         PAGE_SIZE);
1630                         }
1631                 }
1632         }
1633
1634 out:
1635         gfx_v9_0_check_if_need_gfxoff(adev);
1636         gfx_v9_0_check_fw_write_wait(adev);
1637         if (err) {
1638                 dev_err(adev->dev,
1639                         "gfx9: Failed to load firmware \"%s\"\n",
1640                         fw_name);
1641                 release_firmware(adev->gfx.mec_fw);
1642                 adev->gfx.mec_fw = NULL;
1643                 release_firmware(adev->gfx.mec2_fw);
1644                 adev->gfx.mec2_fw = NULL;
1645         }
1646         return err;
1647 }
1648
1649 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1650 {
1651         const char *chip_name;
1652         int r;
1653
1654         DRM_DEBUG("\n");
1655
1656         switch (adev->asic_type) {
1657         case CHIP_VEGA10:
1658                 chip_name = "vega10";
1659                 break;
1660         case CHIP_VEGA12:
1661                 chip_name = "vega12";
1662                 break;
1663         case CHIP_VEGA20:
1664                 chip_name = "vega20";
1665                 break;
1666         case CHIP_RAVEN:
1667                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1668                         chip_name = "raven2";
1669                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1670                         chip_name = "picasso";
1671                 else
1672                         chip_name = "raven";
1673                 break;
1674         case CHIP_ARCTURUS:
1675                 chip_name = "arcturus";
1676                 break;
1677         case CHIP_RENOIR:
1678                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1679                         chip_name = "renoir";
1680                 else
1681                         chip_name = "green_sardine";
1682                 break;
1683         case CHIP_ALDEBARAN:
1684                 chip_name = "aldebaran";
1685                 break;
1686         default:
1687                 BUG();
1688         }
1689
1690         /* No CPG in Arcturus */
1691         if (adev->gfx.num_gfx_rings) {
1692                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1693                 if (r)
1694                         return r;
1695         }
1696
1697         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1698         if (r)
1699                 return r;
1700
1701         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1702         if (r)
1703                 return r;
1704
1705         return r;
1706 }
1707
1708 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1709 {
1710         u32 count = 0;
1711         const struct cs_section_def *sect = NULL;
1712         const struct cs_extent_def *ext = NULL;
1713
1714         /* begin clear state */
1715         count += 2;
1716         /* context control state */
1717         count += 3;
1718
1719         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1720                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1721                         if (sect->id == SECT_CONTEXT)
1722                                 count += 2 + ext->reg_count;
1723                         else
1724                                 return 0;
1725                 }
1726         }
1727
1728         /* end clear state */
1729         count += 2;
1730         /* clear state */
1731         count += 2;
1732
1733         return count;
1734 }
1735
1736 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1737                                     volatile u32 *buffer)
1738 {
1739         u32 count = 0, i;
1740         const struct cs_section_def *sect = NULL;
1741         const struct cs_extent_def *ext = NULL;
1742
1743         if (adev->gfx.rlc.cs_data == NULL)
1744                 return;
1745         if (buffer == NULL)
1746                 return;
1747
1748         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1749         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1750
1751         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1752         buffer[count++] = cpu_to_le32(0x80000000);
1753         buffer[count++] = cpu_to_le32(0x80000000);
1754
1755         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1756                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1757                         if (sect->id == SECT_CONTEXT) {
1758                                 buffer[count++] =
1759                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1760                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1761                                                 PACKET3_SET_CONTEXT_REG_START);
1762                                 for (i = 0; i < ext->reg_count; i++)
1763                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1764                         } else {
1765                                 return;
1766                         }
1767                 }
1768         }
1769
1770         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1771         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1772
1773         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1774         buffer[count++] = cpu_to_le32(0);
1775 }
1776
1777 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1778 {
1779         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1780         uint32_t pg_always_on_cu_num = 2;
1781         uint32_t always_on_cu_num;
1782         uint32_t i, j, k;
1783         uint32_t mask, cu_bitmap, counter;
1784
1785         if (adev->flags & AMD_IS_APU)
1786                 always_on_cu_num = 4;
1787         else if (adev->asic_type == CHIP_VEGA12)
1788                 always_on_cu_num = 8;
1789         else
1790                 always_on_cu_num = 12;
1791
1792         mutex_lock(&adev->grbm_idx_mutex);
1793         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1794                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1795                         mask = 1;
1796                         cu_bitmap = 0;
1797                         counter = 0;
1798                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1799
1800                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1801                                 if (cu_info->bitmap[i][j] & mask) {
1802                                         if (counter == pg_always_on_cu_num)
1803                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1804                                         if (counter < always_on_cu_num)
1805                                                 cu_bitmap |= mask;
1806                                         else
1807                                                 break;
1808                                         counter++;
1809                                 }
1810                                 mask <<= 1;
1811                         }
1812
1813                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1814                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1815                 }
1816         }
1817         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1818         mutex_unlock(&adev->grbm_idx_mutex);
1819 }
1820
1821 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1822 {
1823         uint32_t data;
1824
1825         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1826         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1827         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1828         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1829         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1830
1831         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1832         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1833
1834         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1835         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1836
1837         mutex_lock(&adev->grbm_idx_mutex);
1838         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1839         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1840         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1841
1842         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1843         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1844         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1845         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1846         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1847
1848         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1849         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1850         data &= 0x0000FFFF;
1851         data |= 0x00C00000;
1852         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1853
1854         /*
1855          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1856          * programmed in gfx_v9_0_init_always_on_cu_mask()
1857          */
1858
1859         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1860          * but used for RLC_LB_CNTL configuration */
1861         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1862         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1863         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1864         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1865         mutex_unlock(&adev->grbm_idx_mutex);
1866
1867         gfx_v9_0_init_always_on_cu_mask(adev);
1868 }
1869
1870 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1871 {
1872         uint32_t data;
1873
1874         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1875         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1876         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1877         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1878         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1879
1880         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1881         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1882
1883         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1884         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1885
1886         mutex_lock(&adev->grbm_idx_mutex);
1887         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1888         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1889         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1890
1891         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1892         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1893         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1894         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1895         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1896
1897         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1898         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1899         data &= 0x0000FFFF;
1900         data |= 0x00C00000;
1901         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1902
1903         /*
1904          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1905          * programmed in gfx_v9_0_init_always_on_cu_mask()
1906          */
1907
1908         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1909          * but used for RLC_LB_CNTL configuration */
1910         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1911         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1912         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1913         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1914         mutex_unlock(&adev->grbm_idx_mutex);
1915
1916         gfx_v9_0_init_always_on_cu_mask(adev);
1917 }
1918
1919 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1920 {
1921         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1922 }
1923
1924 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1925 {
1926         if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1927                 return 5;
1928         else
1929                 return 4;
1930 }
1931
1932 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1933 {
1934         const struct cs_section_def *cs_data;
1935         int r;
1936
1937         adev->gfx.rlc.cs_data = gfx9_cs_data;
1938
1939         cs_data = adev->gfx.rlc.cs_data;
1940
1941         if (cs_data) {
1942                 /* init clear state block */
1943                 r = amdgpu_gfx_rlc_init_csb(adev);
1944                 if (r)
1945                         return r;
1946         }
1947
1948         if (adev->flags & AMD_IS_APU) {
1949                 /* TODO: double check the cp_table_size for RV */
1950                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1951                 r = amdgpu_gfx_rlc_init_cpt(adev);
1952                 if (r)
1953                         return r;
1954         }
1955
1956         switch (adev->asic_type) {
1957         case CHIP_RAVEN:
1958                 gfx_v9_0_init_lbpw(adev);
1959                 break;
1960         case CHIP_VEGA20:
1961                 gfx_v9_4_init_lbpw(adev);
1962                 break;
1963         default:
1964                 break;
1965         }
1966
1967         /* init spm vmid with 0xf */
1968         if (adev->gfx.rlc.funcs->update_spm_vmid)
1969                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1970
1971         return 0;
1972 }
1973
1974 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1975 {
1976         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1977         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1978 }
1979
1980 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1981 {
1982         int r;
1983         u32 *hpd;
1984         const __le32 *fw_data;
1985         unsigned fw_size;
1986         u32 *fw;
1987         size_t mec_hpd_size;
1988
1989         const struct gfx_firmware_header_v1_0 *mec_hdr;
1990
1991         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1992
1993         /* take ownership of the relevant compute queues */
1994         amdgpu_gfx_compute_queue_acquire(adev);
1995         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1996         if (mec_hpd_size) {
1997                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1998                                               AMDGPU_GEM_DOMAIN_VRAM,
1999                                               &adev->gfx.mec.hpd_eop_obj,
2000                                               &adev->gfx.mec.hpd_eop_gpu_addr,
2001                                               (void **)&hpd);
2002                 if (r) {
2003                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2004                         gfx_v9_0_mec_fini(adev);
2005                         return r;
2006                 }
2007
2008                 memset(hpd, 0, mec_hpd_size);
2009
2010                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2011                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2012         }
2013
2014         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2015
2016         fw_data = (const __le32 *)
2017                 (adev->gfx.mec_fw->data +
2018                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2019         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
2020
2021         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
2022                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2023                                       &adev->gfx.mec.mec_fw_obj,
2024                                       &adev->gfx.mec.mec_fw_gpu_addr,
2025                                       (void **)&fw);
2026         if (r) {
2027                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
2028                 gfx_v9_0_mec_fini(adev);
2029                 return r;
2030         }
2031
2032         memcpy(fw, fw_data, fw_size);
2033
2034         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
2035         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
2036
2037         return 0;
2038 }
2039
2040 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
2041 {
2042         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2043                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2044                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2045                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
2046                 (SQ_IND_INDEX__FORCE_READ_MASK));
2047         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2048 }
2049
2050 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2051                            uint32_t wave, uint32_t thread,
2052                            uint32_t regno, uint32_t num, uint32_t *out)
2053 {
2054         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2055                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2056                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2057                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2058                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2059                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2060                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2061         while (num--)
2062                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2063 }
2064
2065 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2066 {
2067         /* type 1 wave data */
2068         dst[(*no_fields)++] = 1;
2069         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2070         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2071         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2072         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2073         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2074         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2075         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2076         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2077         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2078         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2079         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2080         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2081         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2082         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2083 }
2084
2085 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2086                                      uint32_t wave, uint32_t start,
2087                                      uint32_t size, uint32_t *dst)
2088 {
2089         wave_read_regs(
2090                 adev, simd, wave, 0,
2091                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2092 }
2093
2094 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2095                                      uint32_t wave, uint32_t thread,
2096                                      uint32_t start, uint32_t size,
2097                                      uint32_t *dst)
2098 {
2099         wave_read_regs(
2100                 adev, simd, wave, thread,
2101                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2102 }
2103
2104 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2105                                   u32 me, u32 pipe, u32 q, u32 vm)
2106 {
2107         soc15_grbm_select(adev, me, pipe, q, vm);
2108 }
2109
2110 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2111         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2112         .select_se_sh = &gfx_v9_0_select_se_sh,
2113         .read_wave_data = &gfx_v9_0_read_wave_data,
2114         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2115         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2116         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2117 };
2118
2119 static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
2120         .ras_late_init = amdgpu_gfx_ras_late_init,
2121         .ras_fini = amdgpu_gfx_ras_fini,
2122         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2123         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2124         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2125 };
2126
2127 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2128 {
2129         u32 gb_addr_config;
2130         int err;
2131
2132         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2133
2134         switch (adev->asic_type) {
2135         case CHIP_VEGA10:
2136                 adev->gfx.config.max_hw_contexts = 8;
2137                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2138                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2139                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2140                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2141                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2142                 break;
2143         case CHIP_VEGA12:
2144                 adev->gfx.config.max_hw_contexts = 8;
2145                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2146                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2147                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2148                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2149                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2150                 DRM_INFO("fix gfx.config for vega12\n");
2151                 break;
2152         case CHIP_VEGA20:
2153                 adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
2154                 adev->gfx.config.max_hw_contexts = 8;
2155                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2156                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2157                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2158                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2159                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2160                 gb_addr_config &= ~0xf3e777ff;
2161                 gb_addr_config |= 0x22014042;
2162                 /* check vbios table if gpu info is not available */
2163                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2164                 if (err)
2165                         return err;
2166                 break;
2167         case CHIP_RAVEN:
2168                 adev->gfx.config.max_hw_contexts = 8;
2169                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2170                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2171                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2172                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2173                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2174                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2175                 else
2176                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2177                 break;
2178         case CHIP_ARCTURUS:
2179                 adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
2180                 adev->gfx.config.max_hw_contexts = 8;
2181                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2182                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2183                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2184                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2185                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2186                 gb_addr_config &= ~0xf3e777ff;
2187                 gb_addr_config |= 0x22014042;
2188                 break;
2189         case CHIP_RENOIR:
2190                 adev->gfx.config.max_hw_contexts = 8;
2191                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2192                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2193                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2194                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2195                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2196                 gb_addr_config &= ~0xf3e777ff;
2197                 gb_addr_config |= 0x22010042;
2198                 break;
2199         case CHIP_ALDEBARAN:
2200                 adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
2201                 adev->gfx.config.max_hw_contexts = 8;
2202                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2203                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2204                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2205                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2206                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2207                 gb_addr_config &= ~0xf3e777ff;
2208                 gb_addr_config |= 0x22014042;
2209                 /* check vbios table if gpu info is not available */
2210                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2211                 if (err)
2212                         return err;
2213                 break;
2214         default:
2215                 BUG();
2216                 break;
2217         }
2218
2219         adev->gfx.config.gb_addr_config = gb_addr_config;
2220
2221         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2222                         REG_GET_FIELD(
2223                                         adev->gfx.config.gb_addr_config,
2224                                         GB_ADDR_CONFIG,
2225                                         NUM_PIPES);
2226
2227         adev->gfx.config.max_tile_pipes =
2228                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2229
2230         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2231                         REG_GET_FIELD(
2232                                         adev->gfx.config.gb_addr_config,
2233                                         GB_ADDR_CONFIG,
2234                                         NUM_BANKS);
2235         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2236                         REG_GET_FIELD(
2237                                         adev->gfx.config.gb_addr_config,
2238                                         GB_ADDR_CONFIG,
2239                                         MAX_COMPRESSED_FRAGS);
2240         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2241                         REG_GET_FIELD(
2242                                         adev->gfx.config.gb_addr_config,
2243                                         GB_ADDR_CONFIG,
2244                                         NUM_RB_PER_SE);
2245         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2246                         REG_GET_FIELD(
2247                                         adev->gfx.config.gb_addr_config,
2248                                         GB_ADDR_CONFIG,
2249                                         NUM_SHADER_ENGINES);
2250         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2251                         REG_GET_FIELD(
2252                                         adev->gfx.config.gb_addr_config,
2253                                         GB_ADDR_CONFIG,
2254                                         PIPE_INTERLEAVE_SIZE));
2255
2256         return 0;
2257 }
2258
2259 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2260                                       int mec, int pipe, int queue)
2261 {
2262         unsigned irq_type;
2263         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2264         unsigned int hw_prio;
2265
2266         ring = &adev->gfx.compute_ring[ring_id];
2267
2268         /* mec0 is me1 */
2269         ring->me = mec + 1;
2270         ring->pipe = pipe;
2271         ring->queue = queue;
2272
2273         ring->ring_obj = NULL;
2274         ring->use_doorbell = true;
2275         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2276         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2277                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2278         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2279
2280         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2281                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2282                 + ring->pipe;
2283         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2284                         AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2285         /* type-2 packets are deprecated on MEC, use type-3 instead */
2286         return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2287                                 hw_prio, NULL);
2288 }
2289
2290 static int gfx_v9_0_sw_init(void *handle)
2291 {
2292         int i, j, k, r, ring_id;
2293         struct amdgpu_ring *ring;
2294         struct amdgpu_kiq *kiq;
2295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297         switch (adev->asic_type) {
2298         case CHIP_VEGA10:
2299         case CHIP_VEGA12:
2300         case CHIP_VEGA20:
2301         case CHIP_RAVEN:
2302         case CHIP_ARCTURUS:
2303         case CHIP_RENOIR:
2304         case CHIP_ALDEBARAN:
2305                 adev->gfx.mec.num_mec = 2;
2306                 break;
2307         default:
2308                 adev->gfx.mec.num_mec = 1;
2309                 break;
2310         }
2311
2312         adev->gfx.mec.num_pipe_per_mec = 4;
2313         adev->gfx.mec.num_queue_per_pipe = 8;
2314
2315         /* EOP Event */
2316         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2317         if (r)
2318                 return r;
2319
2320         /* Privileged reg */
2321         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2322                               &adev->gfx.priv_reg_irq);
2323         if (r)
2324                 return r;
2325
2326         /* Privileged inst */
2327         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2328                               &adev->gfx.priv_inst_irq);
2329         if (r)
2330                 return r;
2331
2332         /* ECC error */
2333         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2334                               &adev->gfx.cp_ecc_error_irq);
2335         if (r)
2336                 return r;
2337
2338         /* FUE error */
2339         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2340                               &adev->gfx.cp_ecc_error_irq);
2341         if (r)
2342                 return r;
2343
2344         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2345
2346         gfx_v9_0_scratch_init(adev);
2347
2348         r = gfx_v9_0_init_microcode(adev);
2349         if (r) {
2350                 DRM_ERROR("Failed to load gfx firmware!\n");
2351                 return r;
2352         }
2353
2354         r = adev->gfx.rlc.funcs->init(adev);
2355         if (r) {
2356                 DRM_ERROR("Failed to init rlc BOs!\n");
2357                 return r;
2358         }
2359
2360         r = gfx_v9_0_mec_init(adev);
2361         if (r) {
2362                 DRM_ERROR("Failed to init MEC BOs!\n");
2363                 return r;
2364         }
2365
2366         /* set up the gfx ring */
2367         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2368                 ring = &adev->gfx.gfx_ring[i];
2369                 ring->ring_obj = NULL;
2370                 if (!i)
2371                         sprintf(ring->name, "gfx");
2372                 else
2373                         sprintf(ring->name, "gfx_%d", i);
2374                 ring->use_doorbell = true;
2375                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2376                 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2377                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2378                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
2379                 if (r)
2380                         return r;
2381         }
2382
2383         /* set up the compute queues - allocate horizontally across pipes */
2384         ring_id = 0;
2385         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2386                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2387                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2388                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2389                                         continue;
2390
2391                                 r = gfx_v9_0_compute_ring_init(adev,
2392                                                                ring_id,
2393                                                                i, k, j);
2394                                 if (r)
2395                                         return r;
2396
2397                                 ring_id++;
2398                         }
2399                 }
2400         }
2401
2402         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2403         if (r) {
2404                 DRM_ERROR("Failed to init KIQ BOs!\n");
2405                 return r;
2406         }
2407
2408         kiq = &adev->gfx.kiq;
2409         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2410         if (r)
2411                 return r;
2412
2413         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2414         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2415         if (r)
2416                 return r;
2417
2418         adev->gfx.ce_ram_size = 0x8000;
2419
2420         r = gfx_v9_0_gpu_early_init(adev);
2421         if (r)
2422                 return r;
2423
2424         return 0;
2425 }
2426
2427
2428 static int gfx_v9_0_sw_fini(void *handle)
2429 {
2430         int i;
2431         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2432
2433         if (adev->gfx.ras_funcs &&
2434             adev->gfx.ras_funcs->ras_fini)
2435                 adev->gfx.ras_funcs->ras_fini(adev);
2436
2437         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2438                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2439         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2440                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2441
2442         amdgpu_gfx_mqd_sw_fini(adev);
2443         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2444         amdgpu_gfx_kiq_fini(adev);
2445
2446         gfx_v9_0_mec_fini(adev);
2447         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2448         if (adev->flags & AMD_IS_APU) {
2449                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2450                                 &adev->gfx.rlc.cp_table_gpu_addr,
2451                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2452         }
2453         gfx_v9_0_free_microcode(adev);
2454
2455         return 0;
2456 }
2457
2458
2459 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2460 {
2461         /* TODO */
2462 }
2463
2464 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2465                            u32 instance)
2466 {
2467         u32 data;
2468
2469         if (instance == 0xffffffff)
2470                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2471         else
2472                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2473
2474         if (se_num == 0xffffffff)
2475                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2476         else
2477                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2478
2479         if (sh_num == 0xffffffff)
2480                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2481         else
2482                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2483
2484         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2485 }
2486
2487 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2488 {
2489         u32 data, mask;
2490
2491         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2492         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2493
2494         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2495         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2496
2497         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2498                                          adev->gfx.config.max_sh_per_se);
2499
2500         return (~data) & mask;
2501 }
2502
2503 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2504 {
2505         int i, j;
2506         u32 data;
2507         u32 active_rbs = 0;
2508         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2509                                         adev->gfx.config.max_sh_per_se;
2510
2511         mutex_lock(&adev->grbm_idx_mutex);
2512         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2513                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2514                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2515                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2516                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2517                                                rb_bitmap_width_per_sh);
2518                 }
2519         }
2520         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2521         mutex_unlock(&adev->grbm_idx_mutex);
2522
2523         adev->gfx.config.backend_enable_mask = active_rbs;
2524         adev->gfx.config.num_rbs = hweight32(active_rbs);
2525 }
2526
2527 #define DEFAULT_SH_MEM_BASES    (0x6000)
2528 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2529 {
2530         int i;
2531         uint32_t sh_mem_config;
2532         uint32_t sh_mem_bases;
2533
2534         /*
2535          * Configure apertures:
2536          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2537          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2538          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2539          */
2540         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2541
2542         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2543                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2544                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2545
2546         mutex_lock(&adev->srbm_mutex);
2547         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2548                 soc15_grbm_select(adev, 0, 0, 0, i);
2549                 /* CP and shaders */
2550                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2551                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2552         }
2553         soc15_grbm_select(adev, 0, 0, 0, 0);
2554         mutex_unlock(&adev->srbm_mutex);
2555
2556         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2557            acccess. These should be enabled by FW for target VMIDs. */
2558         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2559                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2560                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2561                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2562                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2563         }
2564 }
2565
2566 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2567 {
2568         int vmid;
2569
2570         /*
2571          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2572          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2573          * the driver can enable them for graphics. VMID0 should maintain
2574          * access so that HWS firmware can save/restore entries.
2575          */
2576         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2577                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2578                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2579                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2580                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2581         }
2582 }
2583
2584 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2585 {
2586         uint32_t tmp;
2587
2588         switch (adev->asic_type) {
2589         case CHIP_ARCTURUS:
2590                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2591                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2592                                         DISABLE_BARRIER_WAITCNT, 1);
2593                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2594                 break;
2595         default:
2596                 break;
2597         }
2598 }
2599
2600 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2601 {
2602         u32 tmp;
2603         int i;
2604
2605         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2606
2607         gfx_v9_0_tiling_mode_table_init(adev);
2608
2609         gfx_v9_0_setup_rb(adev);
2610         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2611         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2612
2613         /* XXX SH_MEM regs */
2614         /* where to put LDS, scratch, GPUVM in FSA64 space */
2615         mutex_lock(&adev->srbm_mutex);
2616         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2617                 soc15_grbm_select(adev, 0, 0, 0, i);
2618                 /* CP and shaders */
2619                 if (i == 0) {
2620                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2621                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2622                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2623                                             !!adev->gmc.noretry);
2624                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2625                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2626                 } else {
2627                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2628                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2629                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2630                                             !!adev->gmc.noretry);
2631                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2632                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2633                                 (adev->gmc.private_aperture_start >> 48));
2634                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2635                                 (adev->gmc.shared_aperture_start >> 48));
2636                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2637                 }
2638         }
2639         soc15_grbm_select(adev, 0, 0, 0, 0);
2640
2641         mutex_unlock(&adev->srbm_mutex);
2642
2643         gfx_v9_0_init_compute_vmid(adev);
2644         gfx_v9_0_init_gds_vmid(adev);
2645         gfx_v9_0_init_sq_config(adev);
2646 }
2647
2648 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2649 {
2650         u32 i, j, k;
2651         u32 mask;
2652
2653         mutex_lock(&adev->grbm_idx_mutex);
2654         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2655                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2656                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2657                         for (k = 0; k < adev->usec_timeout; k++) {
2658                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2659                                         break;
2660                                 udelay(1);
2661                         }
2662                         if (k == adev->usec_timeout) {
2663                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2664                                                       0xffffffff, 0xffffffff);
2665                                 mutex_unlock(&adev->grbm_idx_mutex);
2666                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2667                                          i, j);
2668                                 return;
2669                         }
2670                 }
2671         }
2672         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2673         mutex_unlock(&adev->grbm_idx_mutex);
2674
2675         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2676                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2677                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2678                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2679         for (k = 0; k < adev->usec_timeout; k++) {
2680                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2681                         break;
2682                 udelay(1);
2683         }
2684 }
2685
2686 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2687                                                bool enable)
2688 {
2689         u32 tmp;
2690
2691         /* These interrupts should be enabled to drive DS clock */
2692
2693         tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2694
2695         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2696         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2697         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2698         if(adev->gfx.num_gfx_rings)
2699                 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2700
2701         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2702 }
2703
2704 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2705 {
2706         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2707         /* csib */
2708         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2709                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2710         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2711                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2712         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2713                         adev->gfx.rlc.clear_state_size);
2714 }
2715
2716 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2717                                 int indirect_offset,
2718                                 int list_size,
2719                                 int *unique_indirect_regs,
2720                                 int unique_indirect_reg_count,
2721                                 int *indirect_start_offsets,
2722                                 int *indirect_start_offsets_count,
2723                                 int max_start_offsets_count)
2724 {
2725         int idx;
2726
2727         for (; indirect_offset < list_size; indirect_offset++) {
2728                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2729                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2730                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2731
2732                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2733                         indirect_offset += 2;
2734
2735                         /* look for the matching indice */
2736                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2737                                 if (unique_indirect_regs[idx] ==
2738                                         register_list_format[indirect_offset] ||
2739                                         !unique_indirect_regs[idx])
2740                                         break;
2741                         }
2742
2743                         BUG_ON(idx >= unique_indirect_reg_count);
2744
2745                         if (!unique_indirect_regs[idx])
2746                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2747
2748                         indirect_offset++;
2749                 }
2750         }
2751 }
2752
2753 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2754 {
2755         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2756         int unique_indirect_reg_count = 0;
2757
2758         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2759         int indirect_start_offsets_count = 0;
2760
2761         int list_size = 0;
2762         int i = 0, j = 0;
2763         u32 tmp = 0;
2764
2765         u32 *register_list_format =
2766                 kmemdup(adev->gfx.rlc.register_list_format,
2767                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2768         if (!register_list_format)
2769                 return -ENOMEM;
2770
2771         /* setup unique_indirect_regs array and indirect_start_offsets array */
2772         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2773         gfx_v9_1_parse_ind_reg_list(register_list_format,
2774                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2775                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2776                                     unique_indirect_regs,
2777                                     unique_indirect_reg_count,
2778                                     indirect_start_offsets,
2779                                     &indirect_start_offsets_count,
2780                                     ARRAY_SIZE(indirect_start_offsets));
2781
2782         /* enable auto inc in case it is disabled */
2783         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2784         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2785         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2786
2787         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2788         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2789                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2790         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2791                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2792                         adev->gfx.rlc.register_restore[i]);
2793
2794         /* load indirect register */
2795         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2796                 adev->gfx.rlc.reg_list_format_start);
2797
2798         /* direct register portion */
2799         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2800                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2801                         register_list_format[i]);
2802
2803         /* indirect register portion */
2804         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2805                 if (register_list_format[i] == 0xFFFFFFFF) {
2806                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2807                         continue;
2808                 }
2809
2810                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2811                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2812
2813                 for (j = 0; j < unique_indirect_reg_count; j++) {
2814                         if (register_list_format[i] == unique_indirect_regs[j]) {
2815                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2816                                 break;
2817                         }
2818                 }
2819
2820                 BUG_ON(j >= unique_indirect_reg_count);
2821
2822                 i++;
2823         }
2824
2825         /* set save/restore list size */
2826         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2827         list_size = list_size >> 1;
2828         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2829                 adev->gfx.rlc.reg_restore_list_size);
2830         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2831
2832         /* write the starting offsets to RLC scratch ram */
2833         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2834                 adev->gfx.rlc.starting_offsets_start);
2835         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2836                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2837                        indirect_start_offsets[i]);
2838
2839         /* load unique indirect regs*/
2840         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2841                 if (unique_indirect_regs[i] != 0) {
2842                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2843                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2844                                unique_indirect_regs[i] & 0x3FFFF);
2845
2846                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2847                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2848                                unique_indirect_regs[i] >> 20);
2849                 }
2850         }
2851
2852         kfree(register_list_format);
2853         return 0;
2854 }
2855
2856 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2857 {
2858         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2859 }
2860
2861 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2862                                              bool enable)
2863 {
2864         uint32_t data = 0;
2865         uint32_t default_data = 0;
2866
2867         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2868         if (enable) {
2869                 /* enable GFXIP control over CGPG */
2870                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2871                 if(default_data != data)
2872                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2873
2874                 /* update status */
2875                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2876                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2877                 if(default_data != data)
2878                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2879         } else {
2880                 /* restore GFXIP control over GCPG */
2881                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2882                 if(default_data != data)
2883                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2884         }
2885 }
2886
2887 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2888 {
2889         uint32_t data = 0;
2890
2891         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2892                               AMD_PG_SUPPORT_GFX_SMG |
2893                               AMD_PG_SUPPORT_GFX_DMG)) {
2894                 /* init IDLE_POLL_COUNT = 60 */
2895                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2896                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2897                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2898                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2899
2900                 /* init RLC PG Delay */
2901                 data = 0;
2902                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2903                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2904                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2905                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2906                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2907
2908                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2909                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2910                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2911                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2912
2913                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2914                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2915                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2916                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2917
2918                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2919                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2920
2921                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2922                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2923                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2924                 if (adev->asic_type != CHIP_RENOIR)
2925                         pwr_10_0_gfxip_control_over_cgpg(adev, true);
2926         }
2927 }
2928
2929 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2930                                                 bool enable)
2931 {
2932         uint32_t data = 0;
2933         uint32_t default_data = 0;
2934
2935         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2936         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2937                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2938                              enable ? 1 : 0);
2939         if (default_data != data)
2940                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2941 }
2942
2943 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2944                                                 bool enable)
2945 {
2946         uint32_t data = 0;
2947         uint32_t default_data = 0;
2948
2949         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2950         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2951                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2952                              enable ? 1 : 0);
2953         if(default_data != data)
2954                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2955 }
2956
2957 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2958                                         bool enable)
2959 {
2960         uint32_t data = 0;
2961         uint32_t default_data = 0;
2962
2963         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2964         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2965                              CP_PG_DISABLE,
2966                              enable ? 0 : 1);
2967         if(default_data != data)
2968                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2969 }
2970
2971 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2972                                                 bool enable)
2973 {
2974         uint32_t data, default_data;
2975
2976         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2977         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2978                              GFX_POWER_GATING_ENABLE,
2979                              enable ? 1 : 0);
2980         if(default_data != data)
2981                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2982 }
2983
2984 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2985                                                 bool enable)
2986 {
2987         uint32_t data, default_data;
2988
2989         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2990         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2991                              GFX_PIPELINE_PG_ENABLE,
2992                              enable ? 1 : 0);
2993         if(default_data != data)
2994                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2995
2996         if (!enable)
2997                 /* read any GFX register to wake up GFX */
2998                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2999 }
3000
3001 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3002                                                        bool enable)
3003 {
3004         uint32_t data, default_data;
3005
3006         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3007         data = REG_SET_FIELD(data, RLC_PG_CNTL,
3008                              STATIC_PER_CU_PG_ENABLE,
3009                              enable ? 1 : 0);
3010         if(default_data != data)
3011                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3012 }
3013
3014 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3015                                                 bool enable)
3016 {
3017         uint32_t data, default_data;
3018
3019         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3020         data = REG_SET_FIELD(data, RLC_PG_CNTL,
3021                              DYN_PER_CU_PG_ENABLE,
3022                              enable ? 1 : 0);
3023         if(default_data != data)
3024                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3025 }
3026
3027 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3028 {
3029         gfx_v9_0_init_csb(adev);
3030
3031         /*
3032          * Rlc save restore list is workable since v2_1.
3033          * And it's needed by gfxoff feature.
3034          */
3035         if (adev->gfx.rlc.is_rlc_v2_1) {
3036                 if (adev->asic_type == CHIP_VEGA12 ||
3037                     (adev->apu_flags & AMD_APU_IS_RAVEN2))
3038                         gfx_v9_1_init_rlc_save_restore_list(adev);
3039                 gfx_v9_0_enable_save_restore_machine(adev);
3040         }
3041
3042         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3043                               AMD_PG_SUPPORT_GFX_SMG |
3044                               AMD_PG_SUPPORT_GFX_DMG |
3045                               AMD_PG_SUPPORT_CP |
3046                               AMD_PG_SUPPORT_GDS |
3047                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
3048                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
3049                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
3050                 gfx_v9_0_init_gfx_power_gating(adev);
3051         }
3052 }
3053
3054 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3055 {
3056         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3057         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3058         gfx_v9_0_wait_for_rlc_serdes(adev);
3059 }
3060
3061 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3062 {
3063         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3064         udelay(50);
3065         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3066         udelay(50);
3067 }
3068
3069 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3070 {
3071 #ifdef AMDGPU_RLC_DEBUG_RETRY
3072         u32 rlc_ucode_ver;
3073 #endif
3074
3075         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3076         udelay(50);
3077
3078         /* carrizo do enable cp interrupt after cp inited */
3079         if (!(adev->flags & AMD_IS_APU)) {
3080                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3081                 udelay(50);
3082         }
3083
3084 #ifdef AMDGPU_RLC_DEBUG_RETRY
3085         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3086         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3087         if(rlc_ucode_ver == 0x108) {
3088                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3089                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3090                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3091                  * default is 0x9C4 to create a 100us interval */
3092                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3093                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3094                  * to disable the page fault retry interrupts, default is
3095                  * 0x100 (256) */
3096                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3097         }
3098 #endif
3099 }
3100
3101 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3102 {
3103         const struct rlc_firmware_header_v2_0 *hdr;
3104         const __le32 *fw_data;
3105         unsigned i, fw_size;
3106
3107         if (!adev->gfx.rlc_fw)
3108                 return -EINVAL;
3109
3110         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3111         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3112
3113         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3114                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3115         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3116
3117         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3118                         RLCG_UCODE_LOADING_START_ADDRESS);
3119         for (i = 0; i < fw_size; i++)
3120                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3121         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3122
3123         return 0;
3124 }
3125
3126 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3127 {
3128         int r;
3129
3130         if (amdgpu_sriov_vf(adev)) {
3131                 gfx_v9_0_init_csb(adev);
3132                 return 0;
3133         }
3134
3135         adev->gfx.rlc.funcs->stop(adev);
3136
3137         /* disable CG */
3138         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3139
3140         gfx_v9_0_init_pg(adev);
3141
3142         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3143                 /* legacy rlc firmware loading */
3144                 r = gfx_v9_0_rlc_load_microcode(adev);
3145                 if (r)
3146                         return r;
3147         }
3148
3149         switch (adev->asic_type) {
3150         case CHIP_RAVEN:
3151                 if (amdgpu_lbpw == 0)
3152                         gfx_v9_0_enable_lbpw(adev, false);
3153                 else
3154                         gfx_v9_0_enable_lbpw(adev, true);
3155                 break;
3156         case CHIP_VEGA20:
3157                 if (amdgpu_lbpw > 0)
3158                         gfx_v9_0_enable_lbpw(adev, true);
3159                 else
3160                         gfx_v9_0_enable_lbpw(adev, false);
3161                 break;
3162         default:
3163                 break;
3164         }
3165
3166         adev->gfx.rlc.funcs->start(adev);
3167
3168         return 0;
3169 }
3170
3171 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3172 {
3173         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3174
3175         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3176         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3177         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3178         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3179         udelay(50);
3180 }
3181
3182 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3183 {
3184         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3185         const struct gfx_firmware_header_v1_0 *ce_hdr;
3186         const struct gfx_firmware_header_v1_0 *me_hdr;
3187         const __le32 *fw_data;
3188         unsigned i, fw_size;
3189
3190         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3191                 return -EINVAL;
3192
3193         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3194                 adev->gfx.pfp_fw->data;
3195         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3196                 adev->gfx.ce_fw->data;
3197         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3198                 adev->gfx.me_fw->data;
3199
3200         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3201         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3202         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3203
3204         gfx_v9_0_cp_gfx_enable(adev, false);
3205
3206         /* PFP */
3207         fw_data = (const __le32 *)
3208                 (adev->gfx.pfp_fw->data +
3209                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3210         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3211         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3212         for (i = 0; i < fw_size; i++)
3213                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3214         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3215
3216         /* CE */
3217         fw_data = (const __le32 *)
3218                 (adev->gfx.ce_fw->data +
3219                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3220         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3221         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3222         for (i = 0; i < fw_size; i++)
3223                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3224         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3225
3226         /* ME */
3227         fw_data = (const __le32 *)
3228                 (adev->gfx.me_fw->data +
3229                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3230         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3231         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3232         for (i = 0; i < fw_size; i++)
3233                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3234         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3235
3236         return 0;
3237 }
3238
3239 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3240 {
3241         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3242         const struct cs_section_def *sect = NULL;
3243         const struct cs_extent_def *ext = NULL;
3244         int r, i, tmp;
3245
3246         /* init the CP */
3247         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3248         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3249
3250         gfx_v9_0_cp_gfx_enable(adev, true);
3251
3252         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3253         if (r) {
3254                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3255                 return r;
3256         }
3257
3258         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3259         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3260
3261         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3262         amdgpu_ring_write(ring, 0x80000000);
3263         amdgpu_ring_write(ring, 0x80000000);
3264
3265         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3266                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3267                         if (sect->id == SECT_CONTEXT) {
3268                                 amdgpu_ring_write(ring,
3269                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3270                                                ext->reg_count));
3271                                 amdgpu_ring_write(ring,
3272                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3273                                 for (i = 0; i < ext->reg_count; i++)
3274                                         amdgpu_ring_write(ring, ext->extent[i]);
3275                         }
3276                 }
3277         }
3278
3279         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3280         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3281
3282         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3283         amdgpu_ring_write(ring, 0);
3284
3285         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3286         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3287         amdgpu_ring_write(ring, 0x8000);
3288         amdgpu_ring_write(ring, 0x8000);
3289
3290         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3291         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3292                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3293         amdgpu_ring_write(ring, tmp);
3294         amdgpu_ring_write(ring, 0);
3295
3296         amdgpu_ring_commit(ring);
3297
3298         return 0;
3299 }
3300
3301 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3302 {
3303         struct amdgpu_ring *ring;
3304         u32 tmp;
3305         u32 rb_bufsz;
3306         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3307
3308         /* Set the write pointer delay */
3309         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3310
3311         /* set the RB to use vmid 0 */
3312         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3313
3314         /* Set ring buffer size */
3315         ring = &adev->gfx.gfx_ring[0];
3316         rb_bufsz = order_base_2(ring->ring_size / 8);
3317         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3318         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3319 #ifdef __BIG_ENDIAN
3320         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3321 #endif
3322         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3323
3324         /* Initialize the ring buffer's write pointers */
3325         ring->wptr = 0;
3326         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3327         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3328
3329         /* set the wb address wether it's enabled or not */
3330         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3331         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3332         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3333
3334         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3335         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3336         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3337
3338         mdelay(1);
3339         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3340
3341         rb_addr = ring->gpu_addr >> 8;
3342         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3343         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3344
3345         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3346         if (ring->use_doorbell) {
3347                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3348                                     DOORBELL_OFFSET, ring->doorbell_index);
3349                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3350                                     DOORBELL_EN, 1);
3351         } else {
3352                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3353         }
3354         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3355
3356         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3357                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3358         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3359
3360         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3361                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3362
3363
3364         /* start the ring */
3365         gfx_v9_0_cp_gfx_start(adev);
3366         ring->sched.ready = true;
3367
3368         return 0;
3369 }
3370
3371 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3372 {
3373         if (enable) {
3374                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3375         } else {
3376                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3377                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3378                 adev->gfx.kiq.ring.sched.ready = false;
3379         }
3380         udelay(50);
3381 }
3382
3383 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3384 {
3385         const struct gfx_firmware_header_v1_0 *mec_hdr;
3386         const __le32 *fw_data;
3387         unsigned i;
3388         u32 tmp;
3389
3390         if (!adev->gfx.mec_fw)
3391                 return -EINVAL;
3392
3393         gfx_v9_0_cp_compute_enable(adev, false);
3394
3395         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3396         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3397
3398         fw_data = (const __le32 *)
3399                 (adev->gfx.mec_fw->data +
3400                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3401         tmp = 0;
3402         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3403         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3404         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3405
3406         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3407                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3408         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3409                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3410
3411         /* MEC1 */
3412         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3413                          mec_hdr->jt_offset);
3414         for (i = 0; i < mec_hdr->jt_size; i++)
3415                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3416                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3417
3418         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3419                         adev->gfx.mec_fw_version);
3420         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3421
3422         return 0;
3423 }
3424
3425 /* KIQ functions */
3426 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3427 {
3428         uint32_t tmp;
3429         struct amdgpu_device *adev = ring->adev;
3430
3431         /* tell RLC which is KIQ queue */
3432         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3433         tmp &= 0xffffff00;
3434         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3435         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3436         tmp |= 0x80;
3437         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3438 }
3439
3440 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3441 {
3442         struct amdgpu_device *adev = ring->adev;
3443
3444         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3445                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3446                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3447                         mqd->cp_hqd_queue_priority =
3448                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3449                 }
3450         }
3451 }
3452
3453 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3454 {
3455         struct amdgpu_device *adev = ring->adev;
3456         struct v9_mqd *mqd = ring->mqd_ptr;
3457         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3458         uint32_t tmp;
3459
3460         mqd->header = 0xC0310800;
3461         mqd->compute_pipelinestat_enable = 0x00000001;
3462         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3463         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3464         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3465         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3466         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3467         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3468         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3469         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3470         mqd->compute_misc_reserved = 0x00000003;
3471
3472         mqd->dynamic_cu_mask_addr_lo =
3473                 lower_32_bits(ring->mqd_gpu_addr
3474                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3475         mqd->dynamic_cu_mask_addr_hi =
3476                 upper_32_bits(ring->mqd_gpu_addr
3477                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3478
3479         eop_base_addr = ring->eop_gpu_addr >> 8;
3480         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3481         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3482
3483         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3484         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3485         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3486                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3487
3488         mqd->cp_hqd_eop_control = tmp;
3489
3490         /* enable doorbell? */
3491         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3492
3493         if (ring->use_doorbell) {
3494                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3495                                     DOORBELL_OFFSET, ring->doorbell_index);
3496                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3497                                     DOORBELL_EN, 1);
3498                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3499                                     DOORBELL_SOURCE, 0);
3500                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3501                                     DOORBELL_HIT, 0);
3502         } else {
3503                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3504                                          DOORBELL_EN, 0);
3505         }
3506
3507         mqd->cp_hqd_pq_doorbell_control = tmp;
3508
3509         /* disable the queue if it's active */
3510         ring->wptr = 0;
3511         mqd->cp_hqd_dequeue_request = 0;
3512         mqd->cp_hqd_pq_rptr = 0;
3513         mqd->cp_hqd_pq_wptr_lo = 0;
3514         mqd->cp_hqd_pq_wptr_hi = 0;
3515
3516         /* set the pointer to the MQD */
3517         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3518         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3519
3520         /* set MQD vmid to 0 */
3521         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3522         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3523         mqd->cp_mqd_control = tmp;
3524
3525         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3526         hqd_gpu_addr = ring->gpu_addr >> 8;
3527         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3528         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3529
3530         /* set up the HQD, this is similar to CP_RB0_CNTL */
3531         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3532         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3533                             (order_base_2(ring->ring_size / 4) - 1));
3534         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3535                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3536 #ifdef __BIG_ENDIAN
3537         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3538 #endif
3539         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3540         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3541         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3542         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3543         mqd->cp_hqd_pq_control = tmp;
3544
3545         /* set the wb address whether it's enabled or not */
3546         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3547         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3548         mqd->cp_hqd_pq_rptr_report_addr_hi =
3549                 upper_32_bits(wb_gpu_addr) & 0xffff;
3550
3551         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3552         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3553         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3554         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3555
3556         tmp = 0;
3557         /* enable the doorbell if requested */
3558         if (ring->use_doorbell) {
3559                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3560                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3561                                 DOORBELL_OFFSET, ring->doorbell_index);
3562
3563                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3564                                          DOORBELL_EN, 1);
3565                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3566                                          DOORBELL_SOURCE, 0);
3567                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3568                                          DOORBELL_HIT, 0);
3569         }
3570
3571         mqd->cp_hqd_pq_doorbell_control = tmp;
3572
3573         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3574         ring->wptr = 0;
3575         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3576
3577         /* set the vmid for the queue */
3578         mqd->cp_hqd_vmid = 0;
3579
3580         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3581         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3582         mqd->cp_hqd_persistent_state = tmp;
3583
3584         /* set MIN_IB_AVAIL_SIZE */
3585         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3586         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3587         mqd->cp_hqd_ib_control = tmp;
3588
3589         /* set static priority for a queue/ring */
3590         gfx_v9_0_mqd_set_priority(ring, mqd);
3591         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3592
3593         /* map_queues packet doesn't need activate the queue,
3594          * so only kiq need set this field.
3595          */
3596         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3597                 mqd->cp_hqd_active = 1;
3598
3599         return 0;
3600 }
3601
3602 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3603 {
3604         struct amdgpu_device *adev = ring->adev;
3605         struct v9_mqd *mqd = ring->mqd_ptr;
3606         int j;
3607
3608         /* disable wptr polling */
3609         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3610
3611         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3612                mqd->cp_hqd_eop_base_addr_lo);
3613         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3614                mqd->cp_hqd_eop_base_addr_hi);
3615
3616         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3617         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3618                mqd->cp_hqd_eop_control);
3619
3620         /* enable doorbell? */
3621         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3622                mqd->cp_hqd_pq_doorbell_control);
3623
3624         /* disable the queue if it's active */
3625         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3626                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3627                 for (j = 0; j < adev->usec_timeout; j++) {
3628                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3629                                 break;
3630                         udelay(1);
3631                 }
3632                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3633                        mqd->cp_hqd_dequeue_request);
3634                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3635                        mqd->cp_hqd_pq_rptr);
3636                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3637                        mqd->cp_hqd_pq_wptr_lo);
3638                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3639                        mqd->cp_hqd_pq_wptr_hi);
3640         }
3641
3642         /* set the pointer to the MQD */
3643         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3644                mqd->cp_mqd_base_addr_lo);
3645         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3646                mqd->cp_mqd_base_addr_hi);
3647
3648         /* set MQD vmid to 0 */
3649         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3650                mqd->cp_mqd_control);
3651
3652         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3653         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3654                mqd->cp_hqd_pq_base_lo);
3655         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3656                mqd->cp_hqd_pq_base_hi);
3657
3658         /* set up the HQD, this is similar to CP_RB0_CNTL */
3659         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3660                mqd->cp_hqd_pq_control);
3661
3662         /* set the wb address whether it's enabled or not */
3663         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3664                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3665         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3666                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3667
3668         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3669         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3670                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3671         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3672                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3673
3674         /* enable the doorbell if requested */
3675         if (ring->use_doorbell) {
3676                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3677                                         (adev->doorbell_index.kiq * 2) << 2);
3678                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3679                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3680         }
3681
3682         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3683                mqd->cp_hqd_pq_doorbell_control);
3684
3685         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3686         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3687                mqd->cp_hqd_pq_wptr_lo);
3688         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3689                mqd->cp_hqd_pq_wptr_hi);
3690
3691         /* set the vmid for the queue */
3692         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3693
3694         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3695                mqd->cp_hqd_persistent_state);
3696
3697         /* activate the queue */
3698         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3699                mqd->cp_hqd_active);
3700
3701         if (ring->use_doorbell)
3702                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3703
3704         return 0;
3705 }
3706
3707 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3708 {
3709         struct amdgpu_device *adev = ring->adev;
3710         int j;
3711
3712         /* disable the queue if it's active */
3713         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3714
3715                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3716
3717                 for (j = 0; j < adev->usec_timeout; j++) {
3718                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3719                                 break;
3720                         udelay(1);
3721                 }
3722
3723                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3724                         DRM_DEBUG("KIQ dequeue request failed.\n");
3725
3726                         /* Manual disable if dequeue request times out */
3727                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3728                 }
3729
3730                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3731                       0);
3732         }
3733
3734         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3735         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3736         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3737         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3738         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3739         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3740         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3741         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3742
3743         return 0;
3744 }
3745
3746 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3747 {
3748         struct amdgpu_device *adev = ring->adev;
3749         struct v9_mqd *mqd = ring->mqd_ptr;
3750         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3751         struct v9_mqd *tmp_mqd;
3752
3753         gfx_v9_0_kiq_setting(ring);
3754
3755         /* GPU could be in bad state during probe, driver trigger the reset
3756          * after load the SMU, in this case , the mqd is not be initialized.
3757          * driver need to re-init the mqd.
3758          * check mqd->cp_hqd_pq_control since this value should not be 0
3759          */
3760         tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3761         if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3762                 /* for GPU_RESET case , reset MQD to a clean status */
3763                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3764                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3765
3766                 /* reset ring buffer */
3767                 ring->wptr = 0;
3768                 amdgpu_ring_clear_ring(ring);
3769
3770                 mutex_lock(&adev->srbm_mutex);
3771                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3772                 gfx_v9_0_kiq_init_register(ring);
3773                 soc15_grbm_select(adev, 0, 0, 0, 0);
3774                 mutex_unlock(&adev->srbm_mutex);
3775         } else {
3776                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3777                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3778                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3779                 mutex_lock(&adev->srbm_mutex);
3780                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3781                 gfx_v9_0_mqd_init(ring);
3782                 gfx_v9_0_kiq_init_register(ring);
3783                 soc15_grbm_select(adev, 0, 0, 0, 0);
3784                 mutex_unlock(&adev->srbm_mutex);
3785
3786                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3787                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3788         }
3789
3790         return 0;
3791 }
3792
3793 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3794 {
3795         struct amdgpu_device *adev = ring->adev;
3796         struct v9_mqd *mqd = ring->mqd_ptr;
3797         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3798         struct v9_mqd *tmp_mqd;
3799
3800         /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3801          * is not be initialized before
3802          */
3803         tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3804
3805         if (!tmp_mqd->cp_hqd_pq_control ||
3806             (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
3807                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3808                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3809                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3810                 mutex_lock(&adev->srbm_mutex);
3811                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3812                 gfx_v9_0_mqd_init(ring);
3813                 soc15_grbm_select(adev, 0, 0, 0, 0);
3814                 mutex_unlock(&adev->srbm_mutex);
3815
3816                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3817                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3818         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3819                 /* reset MQD to a clean status */
3820                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3821                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3822
3823                 /* reset ring buffer */
3824                 ring->wptr = 0;
3825                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3826                 amdgpu_ring_clear_ring(ring);
3827         } else {
3828                 amdgpu_ring_clear_ring(ring);
3829         }
3830
3831         return 0;
3832 }
3833
3834 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3835 {
3836         struct amdgpu_ring *ring;
3837         int r;
3838
3839         ring = &adev->gfx.kiq.ring;
3840
3841         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3842         if (unlikely(r != 0))
3843                 return r;
3844
3845         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3846         if (unlikely(r != 0))
3847                 return r;
3848
3849         gfx_v9_0_kiq_init_queue(ring);
3850         amdgpu_bo_kunmap(ring->mqd_obj);
3851         ring->mqd_ptr = NULL;
3852         amdgpu_bo_unreserve(ring->mqd_obj);
3853         ring->sched.ready = true;
3854         return 0;
3855 }
3856
3857 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3858 {
3859         struct amdgpu_ring *ring = NULL;
3860         int r = 0, i;
3861
3862         gfx_v9_0_cp_compute_enable(adev, true);
3863
3864         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3865                 ring = &adev->gfx.compute_ring[i];
3866
3867                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3868                 if (unlikely(r != 0))
3869                         goto done;
3870                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3871                 if (!r) {
3872                         r = gfx_v9_0_kcq_init_queue(ring);
3873                         amdgpu_bo_kunmap(ring->mqd_obj);
3874                         ring->mqd_ptr = NULL;
3875                 }
3876                 amdgpu_bo_unreserve(ring->mqd_obj);
3877                 if (r)
3878                         goto done;
3879         }
3880
3881         r = amdgpu_gfx_enable_kcq(adev);
3882 done:
3883         return r;
3884 }
3885
3886 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3887 {
3888         int r, i;
3889         struct amdgpu_ring *ring;
3890
3891         if (!(adev->flags & AMD_IS_APU))
3892                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3893
3894         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3895                 if (adev->gfx.num_gfx_rings) {
3896                         /* legacy firmware loading */
3897                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3898                         if (r)
3899                                 return r;
3900                 }
3901
3902                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3903                 if (r)
3904                         return r;
3905         }
3906
3907         r = gfx_v9_0_kiq_resume(adev);
3908         if (r)
3909                 return r;
3910
3911         if (adev->gfx.num_gfx_rings) {
3912                 r = gfx_v9_0_cp_gfx_resume(adev);
3913                 if (r)
3914                         return r;
3915         }
3916
3917         r = gfx_v9_0_kcq_resume(adev);
3918         if (r)
3919                 return r;
3920
3921         if (adev->gfx.num_gfx_rings) {
3922                 ring = &adev->gfx.gfx_ring[0];
3923                 r = amdgpu_ring_test_helper(ring);
3924                 if (r)
3925                         return r;
3926         }
3927
3928         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3929                 ring = &adev->gfx.compute_ring[i];
3930                 amdgpu_ring_test_helper(ring);
3931         }
3932
3933         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3934
3935         return 0;
3936 }
3937
3938 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3939 {
3940         u32 tmp;
3941
3942         if (adev->asic_type != CHIP_ARCTURUS &&
3943             adev->asic_type != CHIP_ALDEBARAN)
3944                 return;
3945
3946         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3947         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3948                                 adev->df.hash_status.hash_64k);
3949         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3950                                 adev->df.hash_status.hash_2m);
3951         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3952                                 adev->df.hash_status.hash_1g);
3953         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3954 }
3955
3956 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3957 {
3958         if (adev->gfx.num_gfx_rings)
3959                 gfx_v9_0_cp_gfx_enable(adev, enable);
3960         gfx_v9_0_cp_compute_enable(adev, enable);
3961 }
3962
3963 static int gfx_v9_0_hw_init(void *handle)
3964 {
3965         int r;
3966         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3967
3968         if (!amdgpu_sriov_vf(adev))
3969                 gfx_v9_0_init_golden_registers(adev);
3970
3971         gfx_v9_0_constants_init(adev);
3972
3973         gfx_v9_0_init_tcp_config(adev);
3974
3975         r = adev->gfx.rlc.funcs->resume(adev);
3976         if (r)
3977                 return r;
3978
3979         r = gfx_v9_0_cp_resume(adev);
3980         if (r)
3981                 return r;
3982
3983         if (adev->asic_type == CHIP_ALDEBARAN)
3984                 gfx_v9_4_2_set_power_brake_sequence(adev);
3985
3986         return r;
3987 }
3988
3989 static int gfx_v9_0_hw_fini(void *handle)
3990 {
3991         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3992
3993         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3994         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3995         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3996
3997         /* DF freeze and kcq disable will fail */
3998         if (!amdgpu_ras_intr_triggered())
3999                 /* disable KCQ to avoid CPC touch memory not valid anymore */
4000                 amdgpu_gfx_disable_kcq(adev);
4001
4002         if (amdgpu_sriov_vf(adev)) {
4003                 gfx_v9_0_cp_gfx_enable(adev, false);
4004                 /* must disable polling for SRIOV when hw finished, otherwise
4005                  * CPC engine may still keep fetching WB address which is already
4006                  * invalid after sw finished and trigger DMAR reading error in
4007                  * hypervisor side.
4008                  */
4009                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4010                 return 0;
4011         }
4012
4013         /* Use deinitialize sequence from CAIL when unbinding device from driver,
4014          * otherwise KIQ is hanging when binding back
4015          */
4016         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4017                 mutex_lock(&adev->srbm_mutex);
4018                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
4019                                 adev->gfx.kiq.ring.pipe,
4020                                 adev->gfx.kiq.ring.queue, 0);
4021                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
4022                 soc15_grbm_select(adev, 0, 0, 0, 0);
4023                 mutex_unlock(&adev->srbm_mutex);
4024         }
4025
4026         gfx_v9_0_cp_enable(adev, false);
4027
4028         /* Skip suspend with A+A reset */
4029         if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
4030                 dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
4031                 return 0;
4032         }
4033
4034         adev->gfx.rlc.funcs->stop(adev);
4035         return 0;
4036 }
4037
4038 static int gfx_v9_0_suspend(void *handle)
4039 {
4040         return gfx_v9_0_hw_fini(handle);
4041 }
4042
4043 static int gfx_v9_0_resume(void *handle)
4044 {
4045         return gfx_v9_0_hw_init(handle);
4046 }
4047
4048 static bool gfx_v9_0_is_idle(void *handle)
4049 {
4050         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4051
4052         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4053                                 GRBM_STATUS, GUI_ACTIVE))
4054                 return false;
4055         else
4056                 return true;
4057 }
4058
4059 static int gfx_v9_0_wait_for_idle(void *handle)
4060 {
4061         unsigned i;
4062         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4063
4064         for (i = 0; i < adev->usec_timeout; i++) {
4065                 if (gfx_v9_0_is_idle(handle))
4066                         return 0;
4067                 udelay(1);
4068         }
4069         return -ETIMEDOUT;
4070 }
4071
4072 static int gfx_v9_0_soft_reset(void *handle)
4073 {
4074         u32 grbm_soft_reset = 0;
4075         u32 tmp;
4076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4077
4078         /* GRBM_STATUS */
4079         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4080         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4081                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4082                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4083                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4084                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4085                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4086                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4087                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4088                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4089                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4090         }
4091
4092         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4093                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4094                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4095         }
4096
4097         /* GRBM_STATUS2 */
4098         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4099         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4100                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4101                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4102
4103
4104         if (grbm_soft_reset) {
4105                 /* stop the rlc */
4106                 adev->gfx.rlc.funcs->stop(adev);
4107
4108                 if (adev->gfx.num_gfx_rings)
4109                         /* Disable GFX parsing/prefetching */
4110                         gfx_v9_0_cp_gfx_enable(adev, false);
4111
4112                 /* Disable MEC parsing/prefetching */
4113                 gfx_v9_0_cp_compute_enable(adev, false);
4114
4115                 if (grbm_soft_reset) {
4116                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4117                         tmp |= grbm_soft_reset;
4118                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4119                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4120                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4121
4122                         udelay(50);
4123
4124                         tmp &= ~grbm_soft_reset;
4125                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4126                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4127                 }
4128
4129                 /* Wait a little for things to settle down */
4130                 udelay(50);
4131         }
4132         return 0;
4133 }
4134
4135 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4136 {
4137         signed long r, cnt = 0;
4138         unsigned long flags;
4139         uint32_t seq, reg_val_offs = 0;
4140         uint64_t value = 0;
4141         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4142         struct amdgpu_ring *ring = &kiq->ring;
4143
4144         BUG_ON(!ring->funcs->emit_rreg);
4145
4146         spin_lock_irqsave(&kiq->ring_lock, flags);
4147         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4148                 pr_err("critical bug! too many kiq readers\n");
4149                 goto failed_unlock;
4150         }
4151         amdgpu_ring_alloc(ring, 32);
4152         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4153         amdgpu_ring_write(ring, 9 |     /* src: register*/
4154                                 (5 << 8) |      /* dst: memory */
4155                                 (1 << 16) |     /* count sel */
4156                                 (1 << 20));     /* write confirm */
4157         amdgpu_ring_write(ring, 0);
4158         amdgpu_ring_write(ring, 0);
4159         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4160                                 reg_val_offs * 4));
4161         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4162                                 reg_val_offs * 4));
4163         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4164         if (r)
4165                 goto failed_undo;
4166
4167         amdgpu_ring_commit(ring);
4168         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4169
4170         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4171
4172         /* don't wait anymore for gpu reset case because this way may
4173          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4174          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4175          * never return if we keep waiting in virt_kiq_rreg, which cause
4176          * gpu_recover() hang there.
4177          *
4178          * also don't wait anymore for IRQ context
4179          * */
4180         if (r < 1 && (amdgpu_in_reset(adev)))
4181                 goto failed_kiq_read;
4182
4183         might_sleep();
4184         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4185                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4186                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4187         }
4188
4189         if (cnt > MAX_KIQ_REG_TRY)
4190                 goto failed_kiq_read;
4191
4192         mb();
4193         value = (uint64_t)adev->wb.wb[reg_val_offs] |
4194                 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4195         amdgpu_device_wb_free(adev, reg_val_offs);
4196         return value;
4197
4198 failed_undo:
4199         amdgpu_ring_undo(ring);
4200 failed_unlock:
4201         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4202 failed_kiq_read:
4203         if (reg_val_offs)
4204                 amdgpu_device_wb_free(adev, reg_val_offs);
4205         pr_err("failed to read gpu clock\n");
4206         return ~0;
4207 }
4208
4209 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4210 {
4211         uint64_t clock;
4212
4213         amdgpu_gfx_off_ctrl(adev, false);
4214         mutex_lock(&adev->gfx.gpu_clock_mutex);
4215         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4216                 clock = gfx_v9_0_kiq_read_clock(adev);
4217         } else {
4218                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4219                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4220                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4221         }
4222         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4223         amdgpu_gfx_off_ctrl(adev, true);
4224         return clock;
4225 }
4226
4227 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4228                                           uint32_t vmid,
4229                                           uint32_t gds_base, uint32_t gds_size,
4230                                           uint32_t gws_base, uint32_t gws_size,
4231                                           uint32_t oa_base, uint32_t oa_size)
4232 {
4233         struct amdgpu_device *adev = ring->adev;
4234
4235         /* GDS Base */
4236         gfx_v9_0_write_data_to_reg(ring, 0, false,
4237                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4238                                    gds_base);
4239
4240         /* GDS Size */
4241         gfx_v9_0_write_data_to_reg(ring, 0, false,
4242                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4243                                    gds_size);
4244
4245         /* GWS */
4246         gfx_v9_0_write_data_to_reg(ring, 0, false,
4247                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4248                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4249
4250         /* OA */
4251         gfx_v9_0_write_data_to_reg(ring, 0, false,
4252                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4253                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4254 }
4255
4256 static const u32 vgpr_init_compute_shader[] =
4257 {
4258         0xb07c0000, 0xbe8000ff,
4259         0x000000f8, 0xbf110800,
4260         0x7e000280, 0x7e020280,
4261         0x7e040280, 0x7e060280,
4262         0x7e080280, 0x7e0a0280,
4263         0x7e0c0280, 0x7e0e0280,
4264         0x80808800, 0xbe803200,
4265         0xbf84fff5, 0xbf9c0000,
4266         0xd28c0001, 0x0001007f,
4267         0xd28d0001, 0x0002027e,
4268         0x10020288, 0xb8810904,
4269         0xb7814000, 0xd1196a01,
4270         0x00000301, 0xbe800087,
4271         0xbefc00c1, 0xd89c4000,
4272         0x00020201, 0xd89cc080,
4273         0x00040401, 0x320202ff,
4274         0x00000800, 0x80808100,
4275         0xbf84fff8, 0x7e020280,
4276         0xbf810000, 0x00000000,
4277 };
4278
4279 static const u32 sgpr_init_compute_shader[] =
4280 {
4281         0xb07c0000, 0xbe8000ff,
4282         0x0000005f, 0xbee50080,
4283         0xbe812c65, 0xbe822c65,
4284         0xbe832c65, 0xbe842c65,
4285         0xbe852c65, 0xb77c0005,
4286         0x80808500, 0xbf84fff8,
4287         0xbe800080, 0xbf810000,
4288 };
4289
4290 static const u32 vgpr_init_compute_shader_arcturus[] = {
4291         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4292         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4293         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4294         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4295         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4296         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4297         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4298         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4299         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4300         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4301         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4302         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4303         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4304         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4305         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4306         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4307         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4308         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4309         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4310         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4311         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4312         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4313         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4314         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4315         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4316         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4317         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4318         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4319         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4320         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4321         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4322         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4323         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4324         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4325         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4326         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4327         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4328         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4329         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4330         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4331         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4332         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4333         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4334         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4335         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4336         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4337         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4338         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4339         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4340         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4341         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4342         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4343         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4344         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4345         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4346         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4347         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4348         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4349         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4350         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4351         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4352         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4353         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4354         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4355         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4356         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4357         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4358         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4359         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4360         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4361         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4362         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4363         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4364         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4365         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4366         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4367         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4368         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4369         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4370         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4371         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4372         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4373         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4374         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4375         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4376         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4377         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4378         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4379         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4380         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4381         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4382         0xbf84fff8, 0xbf810000,
4383 };
4384
4385 /* When below register arrays changed, please update gpr_reg_size,
4386   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4387   to cover all gfx9 ASICs */
4388 static const struct soc15_reg_entry vgpr_init_regs[] = {
4389    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4390    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4391    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4392    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4393    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4394    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4395    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4396    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4397    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4398    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4399    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4400    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4401    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4402    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4403 };
4404
4405 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4406    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4407    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4408    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4409    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4410    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4411    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4412    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4413    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4414    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4415    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4416    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4417    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4418    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4419    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4420 };
4421
4422 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4423    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4424    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4425    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4426    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4427    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4428    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4429    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4430    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4431    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4432    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4433    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4434    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4435    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4436    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4437 };
4438
4439 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4440    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4441    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4442    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4443    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4444    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4445    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4446    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4447    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4448    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4449    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4450    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4451    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4452    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4453    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4454 };
4455
4456 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4457    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4458    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4459    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4460    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4461    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4462    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4463    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4464    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4465    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4466    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4467    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4468    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4469    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4470    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4471    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4472    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4473    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4474    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4475    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4476    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4477    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4478    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4479    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4480    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4481    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4482    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4483    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4484    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4485    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4486    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4487    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4488    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4489    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4490 };
4491
4492 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4493 {
4494         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4495         int i, r;
4496
4497         /* only support when RAS is enabled */
4498         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4499                 return 0;
4500
4501         r = amdgpu_ring_alloc(ring, 7);
4502         if (r) {
4503                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4504                         ring->name, r);
4505                 return r;
4506         }
4507
4508         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4509         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4510
4511         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4512         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4513                                 PACKET3_DMA_DATA_DST_SEL(1) |
4514                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4515                                 PACKET3_DMA_DATA_ENGINE(0)));
4516         amdgpu_ring_write(ring, 0);
4517         amdgpu_ring_write(ring, 0);
4518         amdgpu_ring_write(ring, 0);
4519         amdgpu_ring_write(ring, 0);
4520         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4521                                 adev->gds.gds_size);
4522
4523         amdgpu_ring_commit(ring);
4524
4525         for (i = 0; i < adev->usec_timeout; i++) {
4526                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4527                         break;
4528                 udelay(1);
4529         }
4530
4531         if (i >= adev->usec_timeout)
4532                 r = -ETIMEDOUT;
4533
4534         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4535
4536         return r;
4537 }
4538
4539 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4540 {
4541         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4542         struct amdgpu_ib ib;
4543         struct dma_fence *f = NULL;
4544         int r, i;
4545         unsigned total_size, vgpr_offset, sgpr_offset;
4546         u64 gpu_addr;
4547
4548         int compute_dim_x = adev->gfx.config.max_shader_engines *
4549                                                 adev->gfx.config.max_cu_per_sh *
4550                                                 adev->gfx.config.max_sh_per_se;
4551         int sgpr_work_group_size = 5;
4552         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4553         int vgpr_init_shader_size;
4554         const u32 *vgpr_init_shader_ptr;
4555         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4556
4557         /* only support when RAS is enabled */
4558         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4559                 return 0;
4560
4561         /* bail if the compute ring is not ready */
4562         if (!ring->sched.ready)
4563                 return 0;
4564
4565         if (adev->asic_type == CHIP_ARCTURUS) {
4566                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4567                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4568                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4569         } else {
4570                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4571                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4572                 vgpr_init_regs_ptr = vgpr_init_regs;
4573         }
4574
4575         total_size =
4576                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4577         total_size +=
4578                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4579         total_size +=
4580                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4581         total_size = ALIGN(total_size, 256);
4582         vgpr_offset = total_size;
4583         total_size += ALIGN(vgpr_init_shader_size, 256);
4584         sgpr_offset = total_size;
4585         total_size += sizeof(sgpr_init_compute_shader);
4586
4587         /* allocate an indirect buffer to put the commands in */
4588         memset(&ib, 0, sizeof(ib));
4589         r = amdgpu_ib_get(adev, NULL, total_size,
4590                                         AMDGPU_IB_POOL_DIRECT, &ib);
4591         if (r) {
4592                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4593                 return r;
4594         }
4595
4596         /* load the compute shaders */
4597         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4598                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4599
4600         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4601                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4602
4603         /* init the ib length to 0 */
4604         ib.length_dw = 0;
4605
4606         /* VGPR */
4607         /* write the register state for the compute dispatch */
4608         for (i = 0; i < gpr_reg_size; i++) {
4609                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4610                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4611                                                                 - PACKET3_SET_SH_REG_START;
4612                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4613         }
4614         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4615         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4616         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4617         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4618                                                         - PACKET3_SET_SH_REG_START;
4619         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4620         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4621
4622         /* write dispatch packet */
4623         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4624         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4625         ib.ptr[ib.length_dw++] = 1; /* y */
4626         ib.ptr[ib.length_dw++] = 1; /* z */
4627         ib.ptr[ib.length_dw++] =
4628                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4629
4630         /* write CS partial flush packet */
4631         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4632         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4633
4634         /* SGPR1 */
4635         /* write the register state for the compute dispatch */
4636         for (i = 0; i < gpr_reg_size; i++) {
4637                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4638                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4639                                                                 - PACKET3_SET_SH_REG_START;
4640                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4641         }
4642         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4643         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4644         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4645         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4646                                                         - PACKET3_SET_SH_REG_START;
4647         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4648         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4649
4650         /* write dispatch packet */
4651         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4652         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4653         ib.ptr[ib.length_dw++] = 1; /* y */
4654         ib.ptr[ib.length_dw++] = 1; /* z */
4655         ib.ptr[ib.length_dw++] =
4656                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4657
4658         /* write CS partial flush packet */
4659         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4660         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4661
4662         /* SGPR2 */
4663         /* write the register state for the compute dispatch */
4664         for (i = 0; i < gpr_reg_size; i++) {
4665                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4666                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4667                                                                 - PACKET3_SET_SH_REG_START;
4668                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4669         }
4670         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4671         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4672         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4673         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4674                                                         - PACKET3_SET_SH_REG_START;
4675         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4676         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4677
4678         /* write dispatch packet */
4679         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4680         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4681         ib.ptr[ib.length_dw++] = 1; /* y */
4682         ib.ptr[ib.length_dw++] = 1; /* z */
4683         ib.ptr[ib.length_dw++] =
4684                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4685
4686         /* write CS partial flush packet */
4687         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4688         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4689
4690         /* shedule the ib on the ring */
4691         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4692         if (r) {
4693                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4694                 goto fail;
4695         }
4696
4697         /* wait for the GPU to finish processing the IB */
4698         r = dma_fence_wait(f, false);
4699         if (r) {
4700                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4701                 goto fail;
4702         }
4703
4704 fail:
4705         amdgpu_ib_free(adev, &ib, NULL);
4706         dma_fence_put(f);
4707
4708         return r;
4709 }
4710
4711 static int gfx_v9_0_early_init(void *handle)
4712 {
4713         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4714
4715         if (adev->asic_type == CHIP_ARCTURUS ||
4716             adev->asic_type == CHIP_ALDEBARAN)
4717                 adev->gfx.num_gfx_rings = 0;
4718         else
4719                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4720         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4721                                           AMDGPU_MAX_COMPUTE_RINGS);
4722         gfx_v9_0_set_kiq_pm4_funcs(adev);
4723         gfx_v9_0_set_ring_funcs(adev);
4724         gfx_v9_0_set_irq_funcs(adev);
4725         gfx_v9_0_set_gds_init(adev);
4726         gfx_v9_0_set_rlc_funcs(adev);
4727
4728         return 0;
4729 }
4730
4731 static int gfx_v9_0_ecc_late_init(void *handle)
4732 {
4733         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4734         int r;
4735
4736         /*
4737          * Temp workaround to fix the issue that CP firmware fails to
4738          * update read pointer when CPDMA is writing clearing operation
4739          * to GDS in suspend/resume sequence on several cards. So just
4740          * limit this operation in cold boot sequence.
4741          */
4742         if ((!adev->in_suspend) &&
4743             (adev->gds.gds_size)) {
4744                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4745                 if (r)
4746                         return r;
4747         }
4748
4749         /* requires IBs so do in late init after IB pool is initialized */
4750         if (adev->asic_type == CHIP_ALDEBARAN)
4751                 r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4752         else
4753                 r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4754
4755         if (r)
4756                 return r;
4757
4758         if (adev->gfx.ras_funcs &&
4759             adev->gfx.ras_funcs->ras_late_init) {
4760                 r = adev->gfx.ras_funcs->ras_late_init(adev);
4761                 if (r)
4762                         return r;
4763         }
4764
4765         if (adev->gfx.ras_funcs &&
4766             adev->gfx.ras_funcs->enable_watchdog_timer)
4767                 adev->gfx.ras_funcs->enable_watchdog_timer(adev);
4768
4769         return 0;
4770 }
4771
4772 static int gfx_v9_0_late_init(void *handle)
4773 {
4774         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4775         int r;
4776
4777         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4778         if (r)
4779                 return r;
4780
4781         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4782         if (r)
4783                 return r;
4784
4785         r = gfx_v9_0_ecc_late_init(handle);
4786         if (r)
4787                 return r;
4788
4789         return 0;
4790 }
4791
4792 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4793 {
4794         uint32_t rlc_setting;
4795
4796         /* if RLC is not enabled, do nothing */
4797         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4798         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4799                 return false;
4800
4801         return true;
4802 }
4803
4804 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4805 {
4806         uint32_t data;
4807         unsigned i;
4808
4809         data = RLC_SAFE_MODE__CMD_MASK;
4810         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4811         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4812
4813         /* wait for RLC_SAFE_MODE */
4814         for (i = 0; i < adev->usec_timeout; i++) {
4815                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4816                         break;
4817                 udelay(1);
4818         }
4819 }
4820
4821 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4822 {
4823         uint32_t data;
4824
4825         data = RLC_SAFE_MODE__CMD_MASK;
4826         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4827 }
4828
4829 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4830                                                 bool enable)
4831 {
4832         amdgpu_gfx_rlc_enter_safe_mode(adev);
4833
4834         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4835                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4836                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4837                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4838         } else {
4839                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4840                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4841                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4842         }
4843
4844         amdgpu_gfx_rlc_exit_safe_mode(adev);
4845 }
4846
4847 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4848                                                 bool enable)
4849 {
4850         /* TODO: double check if we need to perform under safe mode */
4851         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4852
4853         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4854                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4855         else
4856                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4857
4858         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4859                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4860         else
4861                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4862
4863         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4864 }
4865
4866 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4867                                                       bool enable)
4868 {
4869         uint32_t data, def;
4870
4871         amdgpu_gfx_rlc_enter_safe_mode(adev);
4872
4873         /* It is disabled by HW by default */
4874         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4875                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4876                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4877
4878                 if (adev->asic_type != CHIP_VEGA12)
4879                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4880
4881                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4882                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4883                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4884
4885                 /* only for Vega10 & Raven1 */
4886                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4887
4888                 if (def != data)
4889                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4890
4891                 /* MGLS is a global flag to control all MGLS in GFX */
4892                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4893                         /* 2 - RLC memory Light sleep */
4894                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4895                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4896                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4897                                 if (def != data)
4898                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4899                         }
4900                         /* 3 - CP memory Light sleep */
4901                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4902                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4903                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4904                                 if (def != data)
4905                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4906                         }
4907                 }
4908         } else {
4909                 /* 1 - MGCG_OVERRIDE */
4910                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4911
4912                 if (adev->asic_type != CHIP_VEGA12)
4913                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4914
4915                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4916                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4917                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4918                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4919
4920                 if (def != data)
4921                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4922
4923                 /* 2 - disable MGLS in RLC */
4924                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4925                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4926                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4927                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4928                 }
4929
4930                 /* 3 - disable MGLS in CP */
4931                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4932                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4933                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4934                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4935                 }
4936         }
4937
4938         amdgpu_gfx_rlc_exit_safe_mode(adev);
4939 }
4940
4941 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4942                                            bool enable)
4943 {
4944         uint32_t data, def;
4945
4946         if (!adev->gfx.num_gfx_rings)
4947                 return;
4948
4949         amdgpu_gfx_rlc_enter_safe_mode(adev);
4950
4951         /* Enable 3D CGCG/CGLS */
4952         if (enable) {
4953                 /* write cmd to clear cgcg/cgls ov */
4954                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4955                 /* unset CGCG override */
4956                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4957                 /* update CGCG and CGLS override bits */
4958                 if (def != data)
4959                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4960
4961                 /* enable 3Dcgcg FSM(0x0000363f) */
4962                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4963
4964                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4965                         data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4966                                 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4967                 else
4968                         data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
4969
4970                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4971                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4972                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4973                 if (def != data)
4974                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4975
4976                 /* set IDLE_POLL_COUNT(0x00900100) */
4977                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4978                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4979                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4980                 if (def != data)
4981                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4982         } else {
4983                 /* Disable CGCG/CGLS */
4984                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4985                 /* disable cgcg, cgls should be disabled */
4986                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4987                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4988                 /* disable cgcg and cgls in FSM */
4989                 if (def != data)
4990                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4991         }
4992
4993         amdgpu_gfx_rlc_exit_safe_mode(adev);
4994 }
4995
4996 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4997                                                       bool enable)
4998 {
4999         uint32_t def, data;
5000
5001         amdgpu_gfx_rlc_enter_safe_mode(adev);
5002
5003         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5004                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5005                 /* unset CGCG override */
5006                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5007                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5008                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5009                 else
5010                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5011                 /* update CGCG and CGLS override bits */
5012                 if (def != data)
5013                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5014
5015                 /* enable cgcg FSM(0x0000363F) */
5016                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5017
5018                 if (adev->asic_type == CHIP_ARCTURUS)
5019                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5020                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5021                 else
5022                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5023                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5024                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5025                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5026                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5027                 if (def != data)
5028                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5029
5030                 /* set IDLE_POLL_COUNT(0x00900100) */
5031                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5032                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5033                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5034                 if (def != data)
5035                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5036         } else {
5037                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5038                 /* reset CGCG/CGLS bits */
5039                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5040                 /* disable cgcg and cgls in FSM */
5041                 if (def != data)
5042                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5043         }
5044
5045         amdgpu_gfx_rlc_exit_safe_mode(adev);
5046 }
5047
5048 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5049                                             bool enable)
5050 {
5051         if (enable) {
5052                 /* CGCG/CGLS should be enabled after MGCG/MGLS
5053                  * ===  MGCG + MGLS ===
5054                  */
5055                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5056                 /* ===  CGCG /CGLS for GFX 3D Only === */
5057                 gfx_v9_0_update_3d_clock_gating(adev, enable);
5058                 /* ===  CGCG + CGLS === */
5059                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5060         } else {
5061                 /* CGCG/CGLS should be disabled before MGCG/MGLS
5062                  * ===  CGCG + CGLS ===
5063                  */
5064                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5065                 /* ===  CGCG /CGLS for GFX 3D Only === */
5066                 gfx_v9_0_update_3d_clock_gating(adev, enable);
5067                 /* ===  MGCG + MGLS === */
5068                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5069         }
5070         return 0;
5071 }
5072
5073 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5074 {
5075         u32 reg, data;
5076
5077         reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5078         if (amdgpu_sriov_is_pp_one_vf(adev))
5079                 data = RREG32_NO_KIQ(reg);
5080         else
5081                 data = RREG32(reg);
5082
5083         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5084         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5085
5086         if (amdgpu_sriov_is_pp_one_vf(adev))
5087                 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5088         else
5089                 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5090 }
5091
5092 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5093                                         uint32_t offset,
5094                                         struct soc15_reg_rlcg *entries, int arr_size)
5095 {
5096         int i;
5097         uint32_t reg;
5098
5099         if (!entries)
5100                 return false;
5101
5102         for (i = 0; i < arr_size; i++) {
5103                 const struct soc15_reg_rlcg *entry;
5104
5105                 entry = &entries[i];
5106                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5107                 if (offset == reg)
5108                         return true;
5109         }
5110
5111         return false;
5112 }
5113
5114 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5115 {
5116         return gfx_v9_0_check_rlcg_range(adev, offset,
5117                                         (void *)rlcg_access_gc_9_0,
5118                                         ARRAY_SIZE(rlcg_access_gc_9_0));
5119 }
5120
5121 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5122         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5123         .set_safe_mode = gfx_v9_0_set_safe_mode,
5124         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5125         .init = gfx_v9_0_rlc_init,
5126         .get_csb_size = gfx_v9_0_get_csb_size,
5127         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5128         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5129         .resume = gfx_v9_0_rlc_resume,
5130         .stop = gfx_v9_0_rlc_stop,
5131         .reset = gfx_v9_0_rlc_reset,
5132         .start = gfx_v9_0_rlc_start,
5133         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5134         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5135         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5136 };
5137
5138 static int gfx_v9_0_set_powergating_state(void *handle,
5139                                           enum amd_powergating_state state)
5140 {
5141         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5142         bool enable = (state == AMD_PG_STATE_GATE);
5143
5144         switch (adev->asic_type) {
5145         case CHIP_RAVEN:
5146         case CHIP_RENOIR:
5147                 if (!enable)
5148                         amdgpu_gfx_off_ctrl(adev, false);
5149
5150                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5151                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5152                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5153                 } else {
5154                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5155                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5156                 }
5157
5158                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5159                         gfx_v9_0_enable_cp_power_gating(adev, true);
5160                 else
5161                         gfx_v9_0_enable_cp_power_gating(adev, false);
5162
5163                 /* update gfx cgpg state */
5164                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5165
5166                 /* update mgcg state */
5167                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5168
5169                 if (enable)
5170                         amdgpu_gfx_off_ctrl(adev, true);
5171                 break;
5172         case CHIP_VEGA12:
5173                 amdgpu_gfx_off_ctrl(adev, enable);
5174                 break;
5175         default:
5176                 break;
5177         }
5178
5179         return 0;
5180 }
5181
5182 static int gfx_v9_0_set_clockgating_state(void *handle,
5183                                           enum amd_clockgating_state state)
5184 {
5185         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5186
5187         if (amdgpu_sriov_vf(adev))
5188                 return 0;
5189
5190         switch (adev->asic_type) {
5191         case CHIP_VEGA10:
5192         case CHIP_VEGA12:
5193         case CHIP_VEGA20:
5194         case CHIP_RAVEN:
5195         case CHIP_ARCTURUS:
5196         case CHIP_RENOIR:
5197         case CHIP_ALDEBARAN:
5198                 gfx_v9_0_update_gfx_clock_gating(adev,
5199                                                  state == AMD_CG_STATE_GATE);
5200                 break;
5201         default:
5202                 break;
5203         }
5204         return 0;
5205 }
5206
5207 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5208 {
5209         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5210         int data;
5211
5212         if (amdgpu_sriov_vf(adev))
5213                 *flags = 0;
5214
5215         /* AMD_CG_SUPPORT_GFX_MGCG */
5216         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5217         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5218                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5219
5220         /* AMD_CG_SUPPORT_GFX_CGCG */
5221         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5222         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5223                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5224
5225         /* AMD_CG_SUPPORT_GFX_CGLS */
5226         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5227                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5228
5229         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5230         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5231         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5232                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5233
5234         /* AMD_CG_SUPPORT_GFX_CP_LS */
5235         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5236         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5237                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5238
5239         if (adev->asic_type != CHIP_ARCTURUS) {
5240                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5241                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5242                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5243                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5244
5245                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5246                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5247                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5248         }
5249 }
5250
5251 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5252 {
5253         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5254 }
5255
5256 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5257 {
5258         struct amdgpu_device *adev = ring->adev;
5259         u64 wptr;
5260
5261         /* XXX check if swapping is necessary on BE */
5262         if (ring->use_doorbell) {
5263                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5264         } else {
5265                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5266                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5267         }
5268
5269         return wptr;
5270 }
5271
5272 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5273 {
5274         struct amdgpu_device *adev = ring->adev;
5275
5276         if (ring->use_doorbell) {
5277                 /* XXX check if swapping is necessary on BE */
5278                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5279                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5280         } else {
5281                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5282                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5283         }
5284 }
5285
5286 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5287 {
5288         struct amdgpu_device *adev = ring->adev;
5289         u32 ref_and_mask, reg_mem_engine;
5290         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5291
5292         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5293                 switch (ring->me) {
5294                 case 1:
5295                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5296                         break;
5297                 case 2:
5298                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5299                         break;
5300                 default:
5301                         return;
5302                 }
5303                 reg_mem_engine = 0;
5304         } else {
5305                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5306                 reg_mem_engine = 1; /* pfp */
5307         }
5308
5309         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5310                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5311                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5312                               ref_and_mask, ref_and_mask, 0x20);
5313 }
5314
5315 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5316                                         struct amdgpu_job *job,
5317                                         struct amdgpu_ib *ib,
5318                                         uint32_t flags)
5319 {
5320         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5321         u32 header, control = 0;
5322
5323         if (ib->flags & AMDGPU_IB_FLAG_CE)
5324                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5325         else
5326                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5327
5328         control |= ib->length_dw | (vmid << 24);
5329
5330         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5331                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5332
5333                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5334                         gfx_v9_0_ring_emit_de_meta(ring);
5335         }
5336
5337         amdgpu_ring_write(ring, header);
5338         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5339         amdgpu_ring_write(ring,
5340 #ifdef __BIG_ENDIAN
5341                 (2 << 0) |
5342 #endif
5343                 lower_32_bits(ib->gpu_addr));
5344         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5345         amdgpu_ring_write(ring, control);
5346 }
5347
5348 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5349                                           struct amdgpu_job *job,
5350                                           struct amdgpu_ib *ib,
5351                                           uint32_t flags)
5352 {
5353         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5354         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5355
5356         /* Currently, there is a high possibility to get wave ID mismatch
5357          * between ME and GDS, leading to a hw deadlock, because ME generates
5358          * different wave IDs than the GDS expects. This situation happens
5359          * randomly when at least 5 compute pipes use GDS ordered append.
5360          * The wave IDs generated by ME are also wrong after suspend/resume.
5361          * Those are probably bugs somewhere else in the kernel driver.
5362          *
5363          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5364          * GDS to 0 for this ring (me/pipe).
5365          */
5366         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5367                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5368                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5369                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5370         }
5371
5372         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5373         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5374         amdgpu_ring_write(ring,
5375 #ifdef __BIG_ENDIAN
5376                                 (2 << 0) |
5377 #endif
5378                                 lower_32_bits(ib->gpu_addr));
5379         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5380         amdgpu_ring_write(ring, control);
5381 }
5382
5383 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5384                                      u64 seq, unsigned flags)
5385 {
5386         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5387         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5388         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5389
5390         /* RELEASE_MEM - flush caches, send int */
5391         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5392         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5393                                                EOP_TC_NC_ACTION_EN) :
5394                                               (EOP_TCL1_ACTION_EN |
5395                                                EOP_TC_ACTION_EN |
5396                                                EOP_TC_WB_ACTION_EN |
5397                                                EOP_TC_MD_ACTION_EN)) |
5398                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5399                                  EVENT_INDEX(5)));
5400         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5401
5402         /*
5403          * the address should be Qword aligned if 64bit write, Dword
5404          * aligned if only send 32bit data low (discard data high)
5405          */
5406         if (write64bit)
5407                 BUG_ON(addr & 0x7);
5408         else
5409                 BUG_ON(addr & 0x3);
5410         amdgpu_ring_write(ring, lower_32_bits(addr));
5411         amdgpu_ring_write(ring, upper_32_bits(addr));
5412         amdgpu_ring_write(ring, lower_32_bits(seq));
5413         amdgpu_ring_write(ring, upper_32_bits(seq));
5414         amdgpu_ring_write(ring, 0);
5415 }
5416
5417 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5418 {
5419         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5420         uint32_t seq = ring->fence_drv.sync_seq;
5421         uint64_t addr = ring->fence_drv.gpu_addr;
5422
5423         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5424                               lower_32_bits(addr), upper_32_bits(addr),
5425                               seq, 0xffffffff, 4);
5426 }
5427
5428 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5429                                         unsigned vmid, uint64_t pd_addr)
5430 {
5431         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5432
5433         /* compute doesn't have PFP */
5434         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5435                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5436                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5437                 amdgpu_ring_write(ring, 0x0);
5438         }
5439 }
5440
5441 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5442 {
5443         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5444 }
5445
5446 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5447 {
5448         u64 wptr;
5449
5450         /* XXX check if swapping is necessary on BE */
5451         if (ring->use_doorbell)
5452                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5453         else
5454                 BUG();
5455         return wptr;
5456 }
5457
5458 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5459 {
5460         struct amdgpu_device *adev = ring->adev;
5461
5462         /* XXX check if swapping is necessary on BE */
5463         if (ring->use_doorbell) {
5464                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5465                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5466         } else{
5467                 BUG(); /* only DOORBELL method supported on gfx9 now */
5468         }
5469 }
5470
5471 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5472                                          u64 seq, unsigned int flags)
5473 {
5474         struct amdgpu_device *adev = ring->adev;
5475
5476         /* we only allocate 32bit for each seq wb address */
5477         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5478
5479         /* write fence seq to the "addr" */
5480         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5481         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5482                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5483         amdgpu_ring_write(ring, lower_32_bits(addr));
5484         amdgpu_ring_write(ring, upper_32_bits(addr));
5485         amdgpu_ring_write(ring, lower_32_bits(seq));
5486
5487         if (flags & AMDGPU_FENCE_FLAG_INT) {
5488                 /* set register to trigger INT */
5489                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5490                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5491                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5492                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5493                 amdgpu_ring_write(ring, 0);
5494                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5495         }
5496 }
5497
5498 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5499 {
5500         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5501         amdgpu_ring_write(ring, 0);
5502 }
5503
5504 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5505 {
5506         struct v9_ce_ib_state ce_payload = {0};
5507         uint64_t csa_addr;
5508         int cnt;
5509
5510         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5511         csa_addr = amdgpu_csa_vaddr(ring->adev);
5512
5513         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5514         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5515                                  WRITE_DATA_DST_SEL(8) |
5516                                  WR_CONFIRM) |
5517                                  WRITE_DATA_CACHE_POLICY(0));
5518         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5519         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5520         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5521 }
5522
5523 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5524 {
5525         struct v9_de_ib_state de_payload = {0};
5526         uint64_t csa_addr, gds_addr;
5527         int cnt;
5528
5529         csa_addr = amdgpu_csa_vaddr(ring->adev);
5530         gds_addr = csa_addr + 4096;
5531         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5532         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5533
5534         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5535         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5536         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5537                                  WRITE_DATA_DST_SEL(8) |
5538                                  WR_CONFIRM) |
5539                                  WRITE_DATA_CACHE_POLICY(0));
5540         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5541         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5542         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5543 }
5544
5545 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5546                                    bool secure)
5547 {
5548         uint32_t v = secure ? FRAME_TMZ : 0;
5549
5550         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5551         amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5552 }
5553
5554 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5555 {
5556         uint32_t dw2 = 0;
5557
5558         if (amdgpu_sriov_vf(ring->adev))
5559                 gfx_v9_0_ring_emit_ce_meta(ring);
5560
5561         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5562         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5563                 /* set load_global_config & load_global_uconfig */
5564                 dw2 |= 0x8001;
5565                 /* set load_cs_sh_regs */
5566                 dw2 |= 0x01000000;
5567                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5568                 dw2 |= 0x10002;
5569
5570                 /* set load_ce_ram if preamble presented */
5571                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5572                         dw2 |= 0x10000000;
5573         } else {
5574                 /* still load_ce_ram if this is the first time preamble presented
5575                  * although there is no context switch happens.
5576                  */
5577                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5578                         dw2 |= 0x10000000;
5579         }
5580
5581         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5582         amdgpu_ring_write(ring, dw2);
5583         amdgpu_ring_write(ring, 0);
5584 }
5585
5586 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5587 {
5588         unsigned ret;
5589         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5590         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5591         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5592         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5593         ret = ring->wptr & ring->buf_mask;
5594         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5595         return ret;
5596 }
5597
5598 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5599 {
5600         unsigned cur;
5601         BUG_ON(offset > ring->buf_mask);
5602         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5603
5604         cur = (ring->wptr & ring->buf_mask) - 1;
5605         if (likely(cur > offset))
5606                 ring->ring[offset] = cur - offset;
5607         else
5608                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5609 }
5610
5611 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5612                                     uint32_t reg_val_offs)
5613 {
5614         struct amdgpu_device *adev = ring->adev;
5615
5616         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5617         amdgpu_ring_write(ring, 0 |     /* src: register*/
5618                                 (5 << 8) |      /* dst: memory */
5619                                 (1 << 20));     /* write confirm */
5620         amdgpu_ring_write(ring, reg);
5621         amdgpu_ring_write(ring, 0);
5622         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5623                                 reg_val_offs * 4));
5624         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5625                                 reg_val_offs * 4));
5626 }
5627
5628 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5629                                     uint32_t val)
5630 {
5631         uint32_t cmd = 0;
5632
5633         switch (ring->funcs->type) {
5634         case AMDGPU_RING_TYPE_GFX:
5635                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5636                 break;
5637         case AMDGPU_RING_TYPE_KIQ:
5638                 cmd = (1 << 16); /* no inc addr */
5639                 break;
5640         default:
5641                 cmd = WR_CONFIRM;
5642                 break;
5643         }
5644         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5645         amdgpu_ring_write(ring, cmd);
5646         amdgpu_ring_write(ring, reg);
5647         amdgpu_ring_write(ring, 0);
5648         amdgpu_ring_write(ring, val);
5649 }
5650
5651 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5652                                         uint32_t val, uint32_t mask)
5653 {
5654         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5655 }
5656
5657 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5658                                                   uint32_t reg0, uint32_t reg1,
5659                                                   uint32_t ref, uint32_t mask)
5660 {
5661         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5662         struct amdgpu_device *adev = ring->adev;
5663         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5664                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5665
5666         if (fw_version_ok)
5667                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5668                                       ref, mask, 0x20);
5669         else
5670                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5671                                                            ref, mask);
5672 }
5673
5674 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5675 {
5676         struct amdgpu_device *adev = ring->adev;
5677         uint32_t value = 0;
5678
5679         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5680         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5681         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5682         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5683         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5684 }
5685
5686 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5687                                                  enum amdgpu_interrupt_state state)
5688 {
5689         switch (state) {
5690         case AMDGPU_IRQ_STATE_DISABLE:
5691         case AMDGPU_IRQ_STATE_ENABLE:
5692                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5693                                TIME_STAMP_INT_ENABLE,
5694                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5695                 break;
5696         default:
5697                 break;
5698         }
5699 }
5700
5701 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5702                                                      int me, int pipe,
5703                                                      enum amdgpu_interrupt_state state)
5704 {
5705         u32 mec_int_cntl, mec_int_cntl_reg;
5706
5707         /*
5708          * amdgpu controls only the first MEC. That's why this function only
5709          * handles the setting of interrupts for this specific MEC. All other
5710          * pipes' interrupts are set by amdkfd.
5711          */
5712
5713         if (me == 1) {
5714                 switch (pipe) {
5715                 case 0:
5716                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5717                         break;
5718                 case 1:
5719                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5720                         break;
5721                 case 2:
5722                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5723                         break;
5724                 case 3:
5725                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5726                         break;
5727                 default:
5728                         DRM_DEBUG("invalid pipe %d\n", pipe);
5729                         return;
5730                 }
5731         } else {
5732                 DRM_DEBUG("invalid me %d\n", me);
5733                 return;
5734         }
5735
5736         switch (state) {
5737         case AMDGPU_IRQ_STATE_DISABLE:
5738                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5739                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5740                                              TIME_STAMP_INT_ENABLE, 0);
5741                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5742                 break;
5743         case AMDGPU_IRQ_STATE_ENABLE:
5744                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5745                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5746                                              TIME_STAMP_INT_ENABLE, 1);
5747                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5748                 break;
5749         default:
5750                 break;
5751         }
5752 }
5753
5754 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5755                                              struct amdgpu_irq_src *source,
5756                                              unsigned type,
5757                                              enum amdgpu_interrupt_state state)
5758 {
5759         switch (state) {
5760         case AMDGPU_IRQ_STATE_DISABLE:
5761         case AMDGPU_IRQ_STATE_ENABLE:
5762                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5763                                PRIV_REG_INT_ENABLE,
5764                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5765                 break;
5766         default:
5767                 break;
5768         }
5769
5770         return 0;
5771 }
5772
5773 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5774                                               struct amdgpu_irq_src *source,
5775                                               unsigned type,
5776                                               enum amdgpu_interrupt_state state)
5777 {
5778         switch (state) {
5779         case AMDGPU_IRQ_STATE_DISABLE:
5780         case AMDGPU_IRQ_STATE_ENABLE:
5781                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5782                                PRIV_INSTR_INT_ENABLE,
5783                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5784                 break;
5785         default:
5786                 break;
5787         }
5788
5789         return 0;
5790 }
5791
5792 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5793         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5794                         CP_ECC_ERROR_INT_ENABLE, 1)
5795
5796 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5797         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5798                         CP_ECC_ERROR_INT_ENABLE, 0)
5799
5800 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5801                                               struct amdgpu_irq_src *source,
5802                                               unsigned type,
5803                                               enum amdgpu_interrupt_state state)
5804 {
5805         switch (state) {
5806         case AMDGPU_IRQ_STATE_DISABLE:
5807                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5808                                 CP_ECC_ERROR_INT_ENABLE, 0);
5809                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5810                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5811                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5812                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5813                 break;
5814
5815         case AMDGPU_IRQ_STATE_ENABLE:
5816                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5817                                 CP_ECC_ERROR_INT_ENABLE, 1);
5818                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5819                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5820                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5821                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5822                 break;
5823         default:
5824                 break;
5825         }
5826
5827         return 0;
5828 }
5829
5830
5831 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5832                                             struct amdgpu_irq_src *src,
5833                                             unsigned type,
5834                                             enum amdgpu_interrupt_state state)
5835 {
5836         switch (type) {
5837         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5838                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5839                 break;
5840         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5841                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5842                 break;
5843         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5844                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5845                 break;
5846         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5847                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5848                 break;
5849         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5850                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5851                 break;
5852         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5853                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5854                 break;
5855         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5856                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5857                 break;
5858         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5859                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5860                 break;
5861         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5862                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5863                 break;
5864         default:
5865                 break;
5866         }
5867         return 0;
5868 }
5869
5870 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5871                             struct amdgpu_irq_src *source,
5872                             struct amdgpu_iv_entry *entry)
5873 {
5874         int i;
5875         u8 me_id, pipe_id, queue_id;
5876         struct amdgpu_ring *ring;
5877
5878         DRM_DEBUG("IH: CP EOP\n");
5879         me_id = (entry->ring_id & 0x0c) >> 2;
5880         pipe_id = (entry->ring_id & 0x03) >> 0;
5881         queue_id = (entry->ring_id & 0x70) >> 4;
5882
5883         switch (me_id) {
5884         case 0:
5885                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5886                 break;
5887         case 1:
5888         case 2:
5889                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5890                         ring = &adev->gfx.compute_ring[i];
5891                         /* Per-queue interrupt is supported for MEC starting from VI.
5892                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5893                           */
5894                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5895                                 amdgpu_fence_process(ring);
5896                 }
5897                 break;
5898         }
5899         return 0;
5900 }
5901
5902 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5903                            struct amdgpu_iv_entry *entry)
5904 {
5905         u8 me_id, pipe_id, queue_id;
5906         struct amdgpu_ring *ring;
5907         int i;
5908
5909         me_id = (entry->ring_id & 0x0c) >> 2;
5910         pipe_id = (entry->ring_id & 0x03) >> 0;
5911         queue_id = (entry->ring_id & 0x70) >> 4;
5912
5913         switch (me_id) {
5914         case 0:
5915                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5916                 break;
5917         case 1:
5918         case 2:
5919                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5920                         ring = &adev->gfx.compute_ring[i];
5921                         if (ring->me == me_id && ring->pipe == pipe_id &&
5922                             ring->queue == queue_id)
5923                                 drm_sched_fault(&ring->sched);
5924                 }
5925                 break;
5926         }
5927 }
5928
5929 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5930                                  struct amdgpu_irq_src *source,
5931                                  struct amdgpu_iv_entry *entry)
5932 {
5933         DRM_ERROR("Illegal register access in command stream\n");
5934         gfx_v9_0_fault(adev, entry);
5935         return 0;
5936 }
5937
5938 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5939                                   struct amdgpu_irq_src *source,
5940                                   struct amdgpu_iv_entry *entry)
5941 {
5942         DRM_ERROR("Illegal instruction in command stream\n");
5943         gfx_v9_0_fault(adev, entry);
5944         return 0;
5945 }
5946
5947
5948 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5949         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5950           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5951           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5952         },
5953         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5954           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5955           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5956         },
5957         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5958           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5959           0, 0
5960         },
5961         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5962           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5963           0, 0
5964         },
5965         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5966           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5967           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5968         },
5969         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5970           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5971           0, 0
5972         },
5973         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5974           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5975           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5976         },
5977         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5978           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5979           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5980         },
5981         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5982           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5983           0, 0
5984         },
5985         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5986           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5987           0, 0
5988         },
5989         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5990           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5991           0, 0
5992         },
5993         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5994           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5995           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5996         },
5997         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5998           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5999           0, 0
6000         },
6001         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6002           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6003           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6004         },
6005         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6006           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6007           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6008           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6009         },
6010         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6011           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6012           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6013           0, 0
6014         },
6015         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6016           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6017           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6018           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6019         },
6020         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6021           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6022           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6023           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6024         },
6025         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6026           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6027           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6028           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6029         },
6030         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6031           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6032           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6033           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6034         },
6035         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6036           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6037           0, 0
6038         },
6039         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6040           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6041           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6042         },
6043         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6044           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6045           0, 0
6046         },
6047         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6048           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6049           0, 0
6050         },
6051         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6052           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6053           0, 0
6054         },
6055         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6056           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6057           0, 0
6058         },
6059         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6060           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6061           0, 0
6062         },
6063         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6064           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6065           0, 0
6066         },
6067         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6068           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6069           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6070         },
6071         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6072           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6073           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6074         },
6075         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6076           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6077           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6078         },
6079         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6080           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6081           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6082         },
6083         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6084           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6085           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6086         },
6087         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6088           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6089           0, 0
6090         },
6091         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6092           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6093           0, 0
6094         },
6095         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6096           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6097           0, 0
6098         },
6099         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6100           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6101           0, 0
6102         },
6103         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6104           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6105           0, 0
6106         },
6107         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6108           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6109           0, 0
6110         },
6111         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6112           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6113           0, 0
6114         },
6115         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6116           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6117           0, 0
6118         },
6119         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6120           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6121           0, 0
6122         },
6123         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6124           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6125           0, 0
6126         },
6127         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6128           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6129           0, 0
6130         },
6131         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6132           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6133           0, 0
6134         },
6135         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6136           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6137           0, 0
6138         },
6139         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6140           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6141           0, 0
6142         },
6143         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6144           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6145           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6146         },
6147         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6148           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6149           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6150         },
6151         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6152           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6153           0, 0
6154         },
6155         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6156           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6157           0, 0
6158         },
6159         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6160           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6161           0, 0
6162         },
6163         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6164           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6165           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6166         },
6167         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6168           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6169           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6170         },
6171         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6172           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6173           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6174         },
6175         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6176           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6177           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6178         },
6179         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6180           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6181           0, 0
6182         },
6183         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6184           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6185           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6186         },
6187         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6188           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6189           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6190         },
6191         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6192           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6193           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6194         },
6195         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6196           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6197           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6198         },
6199         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6200           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6201           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6202         },
6203         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6204           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6205           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6206         },
6207         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6208           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6209           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6210         },
6211         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6212           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6213           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6214         },
6215         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6216           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6217           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6218         },
6219         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6220           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6221           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6222         },
6223         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6224           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6225           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6226         },
6227         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6228           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6229           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6230         },
6231         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6232           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6233           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6234         },
6235         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6236           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6237           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6238         },
6239         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6240           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6241           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6242         },
6243         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6244           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6245           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6246         },
6247         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6248           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6249           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6250         },
6251         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6252           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6253           0, 0
6254         },
6255         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6256           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6257           0, 0
6258         },
6259         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6260           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6261           0, 0
6262         },
6263         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6264           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6265           0, 0
6266         },
6267         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6268           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6269           0, 0
6270         },
6271         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6272           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6273           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6274         },
6275         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6276           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6277           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6278         },
6279         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6280           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6281           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6282         },
6283         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6284           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6285           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6286         },
6287         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6288           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6289           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6290         },
6291         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6292           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6293           0, 0
6294         },
6295         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6296           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6297           0, 0
6298         },
6299         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6300           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6301           0, 0
6302         },
6303         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6304           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6305           0, 0
6306         },
6307         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6308           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6309           0, 0
6310         },
6311         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6312           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6313           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6314         },
6315         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6316           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6317           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6318         },
6319         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6320           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6321           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6322         },
6323         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6324           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6325           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6326         },
6327         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6328           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6329           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6330         },
6331         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6332           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6333           0, 0
6334         },
6335         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6336           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6337           0, 0
6338         },
6339         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6340           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6341           0, 0
6342         },
6343         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6344           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6345           0, 0
6346         },
6347         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6348           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6349           0, 0
6350         },
6351         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6352           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6353           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6354         },
6355         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6356           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6357           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6358         },
6359         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6360           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6361           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6362         },
6363         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6364           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6365           0, 0
6366         },
6367         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6368           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6369           0, 0
6370         },
6371         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6372           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6373           0, 0
6374         },
6375         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6376           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6377           0, 0
6378         },
6379         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6380           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6381           0, 0
6382         },
6383         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6384           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6385           0, 0
6386         }
6387 };
6388
6389 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6390                                      void *inject_if)
6391 {
6392         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6393         int ret;
6394         struct ta_ras_trigger_error_input block_info = { 0 };
6395
6396         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6397                 return -EINVAL;
6398
6399         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6400                 return -EINVAL;
6401
6402         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6403                 return -EPERM;
6404
6405         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6406               info->head.type)) {
6407                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6408                         ras_gfx_subblocks[info->head.sub_block_index].name,
6409                         info->head.type);
6410                 return -EPERM;
6411         }
6412
6413         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6414               info->head.type)) {
6415                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6416                         ras_gfx_subblocks[info->head.sub_block_index].name,
6417                         info->head.type);
6418                 return -EPERM;
6419         }
6420
6421         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6422         block_info.sub_block_index =
6423                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6424         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6425         block_info.address = info->address;
6426         block_info.value = info->value;
6427
6428         mutex_lock(&adev->grbm_idx_mutex);
6429         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6430         mutex_unlock(&adev->grbm_idx_mutex);
6431
6432         return ret;
6433 }
6434
6435 static const char *vml2_mems[] = {
6436         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6437         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6438         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6439         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6440         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6441         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6442         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6443         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6444         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6445         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6446         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6447         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6448         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6449         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6450         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6451         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6452 };
6453
6454 static const char *vml2_walker_mems[] = {
6455         "UTC_VML2_CACHE_PDE0_MEM0",
6456         "UTC_VML2_CACHE_PDE0_MEM1",
6457         "UTC_VML2_CACHE_PDE1_MEM0",
6458         "UTC_VML2_CACHE_PDE1_MEM1",
6459         "UTC_VML2_CACHE_PDE2_MEM0",
6460         "UTC_VML2_CACHE_PDE2_MEM1",
6461         "UTC_VML2_RDIF_LOG_FIFO",
6462 };
6463
6464 static const char *atc_l2_cache_2m_mems[] = {
6465         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6466         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6467         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6468         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6469 };
6470
6471 static const char *atc_l2_cache_4k_mems[] = {
6472         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6473         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6474         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6475         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6476         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6477         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6478         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6479         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6480         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6481         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6482         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6483         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6484         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6485         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6486         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6487         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6488         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6489         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6490         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6491         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6492         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6493         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6494         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6495         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6496         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6497         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6498         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6499         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6500         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6501         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6502         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6503         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6504 };
6505
6506 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6507                                          struct ras_err_data *err_data)
6508 {
6509         uint32_t i, data;
6510         uint32_t sec_count, ded_count;
6511
6512         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6513         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6514         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6515         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6516         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6517         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6518         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6519         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6520
6521         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6522                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6523                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6524
6525                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6526                 if (sec_count) {
6527                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6528                                 "SEC %d\n", i, vml2_mems[i], sec_count);
6529                         err_data->ce_count += sec_count;
6530                 }
6531
6532                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6533                 if (ded_count) {
6534                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6535                                 "DED %d\n", i, vml2_mems[i], ded_count);
6536                         err_data->ue_count += ded_count;
6537                 }
6538         }
6539
6540         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6541                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6542                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6543
6544                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6545                                                 SEC_COUNT);
6546                 if (sec_count) {
6547                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6548                                 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6549                         err_data->ce_count += sec_count;
6550                 }
6551
6552                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6553                                                 DED_COUNT);
6554                 if (ded_count) {
6555                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6556                                 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6557                         err_data->ue_count += ded_count;
6558                 }
6559         }
6560
6561         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6562                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6563                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6564
6565                 sec_count = (data & 0x00006000L) >> 0xd;
6566                 if (sec_count) {
6567                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6568                                 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6569                                 sec_count);
6570                         err_data->ce_count += sec_count;
6571                 }
6572         }
6573
6574         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6575                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6576                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6577
6578                 sec_count = (data & 0x00006000L) >> 0xd;
6579                 if (sec_count) {
6580                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6581                                 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6582                                 sec_count);
6583                         err_data->ce_count += sec_count;
6584                 }
6585
6586                 ded_count = (data & 0x00018000L) >> 0xf;
6587                 if (ded_count) {
6588                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6589                                 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6590                                 ded_count);
6591                         err_data->ue_count += ded_count;
6592                 }
6593         }
6594
6595         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6596         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6597         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6598         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6599
6600         return 0;
6601 }
6602
6603 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6604         const struct soc15_reg_entry *reg,
6605         uint32_t se_id, uint32_t inst_id, uint32_t value,
6606         uint32_t *sec_count, uint32_t *ded_count)
6607 {
6608         uint32_t i;
6609         uint32_t sec_cnt, ded_cnt;
6610
6611         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6612                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6613                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6614                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6615                         continue;
6616
6617                 sec_cnt = (value &
6618                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6619                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6620                 if (sec_cnt) {
6621                         dev_info(adev->dev, "GFX SubBlock %s, "
6622                                 "Instance[%d][%d], SEC %d\n",
6623                                 gfx_v9_0_ras_fields[i].name,
6624                                 se_id, inst_id,
6625                                 sec_cnt);
6626                         *sec_count += sec_cnt;
6627                 }
6628
6629                 ded_cnt = (value &
6630                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6631                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6632                 if (ded_cnt) {
6633                         dev_info(adev->dev, "GFX SubBlock %s, "
6634                                 "Instance[%d][%d], DED %d\n",
6635                                 gfx_v9_0_ras_fields[i].name,
6636                                 se_id, inst_id,
6637                                 ded_cnt);
6638                         *ded_count += ded_cnt;
6639                 }
6640         }
6641
6642         return 0;
6643 }
6644
6645 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6646 {
6647         int i, j, k;
6648
6649         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6650                 return;
6651
6652         /* read back registers to clear the counters */
6653         mutex_lock(&adev->grbm_idx_mutex);
6654         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6655                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6656                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6657                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6658                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6659                         }
6660                 }
6661         }
6662         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6663         mutex_unlock(&adev->grbm_idx_mutex);
6664
6665         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6666         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6667         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6668         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6669         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6670         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6671         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6672         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6673
6674         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6675                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6676                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6677         }
6678
6679         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6680                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6681                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6682         }
6683
6684         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6685                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6686                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6687         }
6688
6689         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6690                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6691                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6692         }
6693
6694         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6695         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6696         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6697         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6698 }
6699
6700 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6701                                           void *ras_error_status)
6702 {
6703         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6704         uint32_t sec_count = 0, ded_count = 0;
6705         uint32_t i, j, k;
6706         uint32_t reg_value;
6707
6708         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6709                 return -EINVAL;
6710
6711         err_data->ue_count = 0;
6712         err_data->ce_count = 0;
6713
6714         mutex_lock(&adev->grbm_idx_mutex);
6715
6716         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6717                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6718                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6719                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6720                                 reg_value =
6721                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6722                                 if (reg_value)
6723                                         gfx_v9_0_ras_error_count(adev,
6724                                                 &gfx_v9_0_edc_counter_regs[i],
6725                                                 j, k, reg_value,
6726                                                 &sec_count, &ded_count);
6727                         }
6728                 }
6729         }
6730
6731         err_data->ce_count += sec_count;
6732         err_data->ue_count += ded_count;
6733
6734         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6735         mutex_unlock(&adev->grbm_idx_mutex);
6736
6737         gfx_v9_0_query_utc_edc_status(adev, err_data);
6738
6739         return 0;
6740 }
6741
6742 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6743 {
6744         const unsigned int cp_coher_cntl =
6745                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6746                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6747                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6748                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6749                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6750
6751         /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6752         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6753         amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6754         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6755         amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6756         amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6757         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6758         amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6759 }
6760
6761 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6762                                         uint32_t pipe, bool enable)
6763 {
6764         struct amdgpu_device *adev = ring->adev;
6765         uint32_t val;
6766         uint32_t wcl_cs_reg;
6767
6768         /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6769         val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
6770
6771         switch (pipe) {
6772         case 0:
6773                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
6774                 break;
6775         case 1:
6776                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
6777                 break;
6778         case 2:
6779                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
6780                 break;
6781         case 3:
6782                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
6783                 break;
6784         default:
6785                 DRM_DEBUG("invalid pipe %d\n", pipe);
6786                 return;
6787         }
6788
6789         amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6790
6791 }
6792 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6793 {
6794         struct amdgpu_device *adev = ring->adev;
6795         uint32_t val;
6796         int i;
6797
6798
6799         /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6800          * number of gfx waves. Setting 5 bit will make sure gfx only gets
6801          * around 25% of gpu resources.
6802          */
6803         val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6804         amdgpu_ring_emit_wreg(ring,
6805                               SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
6806                               val);
6807
6808         /* Restrict waves for normal/low priority compute queues as well
6809          * to get best QoS for high priority compute jobs.
6810          *
6811          * amdgpu controls only 1st ME(0-3 CS pipes).
6812          */
6813         for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6814                 if (i != ring->pipe)
6815                         gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
6816
6817         }
6818 }
6819
6820 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6821         .name = "gfx_v9_0",
6822         .early_init = gfx_v9_0_early_init,
6823         .late_init = gfx_v9_0_late_init,
6824         .sw_init = gfx_v9_0_sw_init,
6825         .sw_fini = gfx_v9_0_sw_fini,
6826         .hw_init = gfx_v9_0_hw_init,
6827         .hw_fini = gfx_v9_0_hw_fini,
6828         .suspend = gfx_v9_0_suspend,
6829         .resume = gfx_v9_0_resume,
6830         .is_idle = gfx_v9_0_is_idle,
6831         .wait_for_idle = gfx_v9_0_wait_for_idle,
6832         .soft_reset = gfx_v9_0_soft_reset,
6833         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6834         .set_powergating_state = gfx_v9_0_set_powergating_state,
6835         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6836 };
6837
6838 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6839         .type = AMDGPU_RING_TYPE_GFX,
6840         .align_mask = 0xff,
6841         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6842         .support_64bit_ptrs = true,
6843         .vmhub = AMDGPU_GFXHUB_0,
6844         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6845         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6846         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6847         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6848                 5 +  /* COND_EXEC */
6849                 7 +  /* PIPELINE_SYNC */
6850                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6851                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6852                 2 + /* VM_FLUSH */
6853                 8 +  /* FENCE for VM_FLUSH */
6854                 20 + /* GDS switch */
6855                 4 + /* double SWITCH_BUFFER,
6856                        the first COND_EXEC jump to the place just
6857                            prior to this double SWITCH_BUFFER  */
6858                 5 + /* COND_EXEC */
6859                 7 +      /*     HDP_flush */
6860                 4 +      /*     VGT_flush */
6861                 14 + /* CE_META */
6862                 31 + /* DE_META */
6863                 3 + /* CNTX_CTRL */
6864                 5 + /* HDP_INVL */
6865                 8 + 8 + /* FENCE x2 */
6866                 2 + /* SWITCH_BUFFER */
6867                 7, /* gfx_v9_0_emit_mem_sync */
6868         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6869         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6870         .emit_fence = gfx_v9_0_ring_emit_fence,
6871         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6872         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6873         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6874         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6875         .test_ring = gfx_v9_0_ring_test_ring,
6876         .test_ib = gfx_v9_0_ring_test_ib,
6877         .insert_nop = amdgpu_ring_insert_nop,
6878         .pad_ib = amdgpu_ring_generic_pad_ib,
6879         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6880         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6881         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6882         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6883         .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6884         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6885         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6886         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6887         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6888         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6889 };
6890
6891 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6892         .type = AMDGPU_RING_TYPE_COMPUTE,
6893         .align_mask = 0xff,
6894         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6895         .support_64bit_ptrs = true,
6896         .vmhub = AMDGPU_GFXHUB_0,
6897         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6898         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6899         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6900         .emit_frame_size =
6901                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6902                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6903                 5 + /* hdp invalidate */
6904                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6905                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6906                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6907                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6908                 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6909                 7 + /* gfx_v9_0_emit_mem_sync */
6910                 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6911                 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6912         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6913         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6914         .emit_fence = gfx_v9_0_ring_emit_fence,
6915         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6916         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6917         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6918         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6919         .test_ring = gfx_v9_0_ring_test_ring,
6920         .test_ib = gfx_v9_0_ring_test_ib,
6921         .insert_nop = amdgpu_ring_insert_nop,
6922         .pad_ib = amdgpu_ring_generic_pad_ib,
6923         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6924         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6925         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6926         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6927         .emit_wave_limit = gfx_v9_0_emit_wave_limit,
6928 };
6929
6930 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6931         .type = AMDGPU_RING_TYPE_KIQ,
6932         .align_mask = 0xff,
6933         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6934         .support_64bit_ptrs = true,
6935         .vmhub = AMDGPU_GFXHUB_0,
6936         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6937         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6938         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6939         .emit_frame_size =
6940                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6941                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6942                 5 + /* hdp invalidate */
6943                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6944                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6945                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6946                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6947                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6948         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6949         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6950         .test_ring = gfx_v9_0_ring_test_ring,
6951         .insert_nop = amdgpu_ring_insert_nop,
6952         .pad_ib = amdgpu_ring_generic_pad_ib,
6953         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6954         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6955         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6956         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6957 };
6958
6959 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6960 {
6961         int i;
6962
6963         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6964
6965         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6966                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6967
6968         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6969                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6970 }
6971
6972 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6973         .set = gfx_v9_0_set_eop_interrupt_state,
6974         .process = gfx_v9_0_eop_irq,
6975 };
6976
6977 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6978         .set = gfx_v9_0_set_priv_reg_fault_state,
6979         .process = gfx_v9_0_priv_reg_irq,
6980 };
6981
6982 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6983         .set = gfx_v9_0_set_priv_inst_fault_state,
6984         .process = gfx_v9_0_priv_inst_irq,
6985 };
6986
6987 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6988         .set = gfx_v9_0_set_cp_ecc_error_state,
6989         .process = amdgpu_gfx_cp_ecc_error_irq,
6990 };
6991
6992
6993 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6994 {
6995         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6996         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6997
6998         adev->gfx.priv_reg_irq.num_types = 1;
6999         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7000
7001         adev->gfx.priv_inst_irq.num_types = 1;
7002         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7003
7004         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7005         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7006 }
7007
7008 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7009 {
7010         switch (adev->asic_type) {
7011         case CHIP_VEGA10:
7012         case CHIP_VEGA12:
7013         case CHIP_VEGA20:
7014         case CHIP_RAVEN:
7015         case CHIP_ARCTURUS:
7016         case CHIP_RENOIR:
7017         case CHIP_ALDEBARAN:
7018                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7019                 break;
7020         default:
7021                 break;
7022         }
7023 }
7024
7025 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7026 {
7027         /* init asci gds info */
7028         switch (adev->asic_type) {
7029         case CHIP_VEGA10:
7030         case CHIP_VEGA12:
7031         case CHIP_VEGA20:
7032                 adev->gds.gds_size = 0x10000;
7033                 break;
7034         case CHIP_RAVEN:
7035         case CHIP_ARCTURUS:
7036                 adev->gds.gds_size = 0x1000;
7037                 break;
7038         case CHIP_ALDEBARAN:
7039                 /* aldebaran removed all the GDS internal memory,
7040                  * only support GWS opcode in kernel, like barrier
7041                  * semaphore.etc */
7042                 adev->gds.gds_size = 0;
7043                 break;
7044         default:
7045                 adev->gds.gds_size = 0x10000;
7046                 break;
7047         }
7048
7049         switch (adev->asic_type) {
7050         case CHIP_VEGA10:
7051         case CHIP_VEGA20:
7052                 adev->gds.gds_compute_max_wave_id = 0x7ff;
7053                 break;
7054         case CHIP_VEGA12:
7055                 adev->gds.gds_compute_max_wave_id = 0x27f;
7056                 break;
7057         case CHIP_RAVEN:
7058                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7059                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7060                 else
7061                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7062                 break;
7063         case CHIP_ARCTURUS:
7064                 adev->gds.gds_compute_max_wave_id = 0xfff;
7065                 break;
7066         case CHIP_ALDEBARAN:
7067                 /* deprecated for Aldebaran, no usage at all */
7068                 adev->gds.gds_compute_max_wave_id = 0;
7069                 break;
7070         default:
7071                 /* this really depends on the chip */
7072                 adev->gds.gds_compute_max_wave_id = 0x7ff;
7073                 break;
7074         }
7075
7076         adev->gds.gws_size = 64;
7077         adev->gds.oa_size = 16;
7078 }
7079
7080 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7081                                                  u32 bitmap)
7082 {
7083         u32 data;
7084
7085         if (!bitmap)
7086                 return;
7087
7088         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7089         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7090
7091         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7092 }
7093
7094 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7095 {
7096         u32 data, mask;
7097
7098         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7099         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7100
7101         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7102         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7103
7104         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7105
7106         return (~data) & mask;
7107 }
7108
7109 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7110                                  struct amdgpu_cu_info *cu_info)
7111 {
7112         int i, j, k, counter, active_cu_number = 0;
7113         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7114         unsigned disable_masks[4 * 4];
7115
7116         if (!adev || !cu_info)
7117                 return -EINVAL;
7118
7119         /*
7120          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7121          */
7122         if (adev->gfx.config.max_shader_engines *
7123                 adev->gfx.config.max_sh_per_se > 16)
7124                 return -EINVAL;
7125
7126         amdgpu_gfx_parse_disable_cu(disable_masks,
7127                                     adev->gfx.config.max_shader_engines,
7128                                     adev->gfx.config.max_sh_per_se);
7129
7130         mutex_lock(&adev->grbm_idx_mutex);
7131         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7132                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7133                         mask = 1;
7134                         ao_bitmap = 0;
7135                         counter = 0;
7136                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
7137                         gfx_v9_0_set_user_cu_inactive_bitmap(
7138                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7139                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7140
7141                         /*
7142                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
7143                          * 4x4 size array, and it's usually suitable for Vega
7144                          * ASICs which has 4*2 SE/SH layout.
7145                          * But for Arcturus, SE/SH layout is changed to 8*1.
7146                          * To mostly reduce the impact, we make it compatible
7147                          * with current bitmap array as below:
7148                          *    SE4,SH0 --> bitmap[0][1]
7149                          *    SE5,SH0 --> bitmap[1][1]
7150                          *    SE6,SH0 --> bitmap[2][1]
7151                          *    SE7,SH0 --> bitmap[3][1]
7152                          */
7153                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
7154
7155                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7156                                 if (bitmap & mask) {
7157                                         if (counter < adev->gfx.config.max_cu_per_sh)
7158                                                 ao_bitmap |= mask;
7159                                         counter ++;
7160                                 }
7161                                 mask <<= 1;
7162                         }
7163                         active_cu_number += counter;
7164                         if (i < 2 && j < 2)
7165                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7166                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7167                 }
7168         }
7169         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7170         mutex_unlock(&adev->grbm_idx_mutex);
7171
7172         cu_info->number = active_cu_number;
7173         cu_info->ao_cu_mask = ao_cu_mask;
7174         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7175
7176         return 0;
7177 }
7178
7179 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7180 {
7181         .type = AMD_IP_BLOCK_TYPE_GFX,
7182         .major = 9,
7183         .minor = 0,
7184         .rev = 0,
7185         .funcs = &gfx_v9_0_ip_funcs,
7186 };