Merge tag 'drm-next-2020-12-24' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
42
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
46
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
48
49 #include "amdgpu_ras.h"
50
51 #include "gfx_v9_4.h"
52 #include "gfx_v9_0.h"
53
54 #include "asic_reg/pwr/pwr_10_0_offset.h"
55 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56
57 #define GFX9_NUM_GFX_RINGS     1
58 #define GFX9_MEC_HPD_SIZE 4096
59 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
60 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
61
62 #define mmGCEA_PROBE_MAP                        0x070c
63 #define mmGCEA_PROBE_MAP_BASE_IDX               0
64
65 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
66 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
71
72 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
73 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
78
79 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
80 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
85
86 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
87 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/raven_me.bin");
89 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
91 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
92
93 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
94 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
100
101 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
102 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
107 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
108
109 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
110 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112
113 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
119
120 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
121 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
122 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
125 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
126
127 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
128 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
129 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
130 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
131 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
132 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
133 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
134 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
135 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
136 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
137 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
138 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
139
140 enum ta_ras_gfx_subblock {
141         /*CPC*/
142         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
143         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
144         TA_RAS_BLOCK__GFX_CPC_UCODE,
145         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
146         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
147         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
148         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
149         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
150         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
151         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
152         /* CPF*/
153         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
154         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
155         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
156         TA_RAS_BLOCK__GFX_CPF_TAG,
157         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
158         /* CPG*/
159         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
160         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
161         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
162         TA_RAS_BLOCK__GFX_CPG_TAG,
163         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
164         /* GDS*/
165         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
166         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
167         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
168         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
169         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
170         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
171         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
172         /* SPI*/
173         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
174         /* SQ*/
175         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
176         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
177         TA_RAS_BLOCK__GFX_SQ_LDS_D,
178         TA_RAS_BLOCK__GFX_SQ_LDS_I,
179         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
180         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
181         /* SQC (3 ranges)*/
182         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
183         /* SQC range 0*/
184         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
185         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
186                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
187         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
188         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
189         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
190         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
191         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
192         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
193         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
194                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
195         /* SQC range 1*/
196         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
197         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
198                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
199         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
200         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
201         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
202         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
203         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
204         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
205         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
206         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
207         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
208                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
209         /* SQC range 2*/
210         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
211         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
212                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
213         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
214         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
215         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
216         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
217         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
218         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
219         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
220         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
221         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
222                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
223         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
224         /* TA*/
225         TA_RAS_BLOCK__GFX_TA_INDEX_START,
226         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
227         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
228         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
229         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
230         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
231         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
232         /* TCA*/
233         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
234         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
235         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
236         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
237         /* TCC (5 sub-ranges)*/
238         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
239         /* TCC range 0*/
240         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
241         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
242         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
243         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
244         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
245         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
246         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
247         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
248         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
249         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
250         /* TCC range 1*/
251         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
252         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
253         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
254         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
255                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
256         /* TCC range 2*/
257         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
258         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
259         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
260         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
261         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
262         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
263         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
264         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
265         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
266         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
267                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
268         /* TCC range 3*/
269         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
270         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
271         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
272         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
273                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
274         /* TCC range 4*/
275         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
276         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
277                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
278         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
279         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
280                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
281         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
282         /* TCI*/
283         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
284         /* TCP*/
285         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
286         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
287         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
288         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
289         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
290         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
291         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
292         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
293         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
294         /* TD*/
295         TA_RAS_BLOCK__GFX_TD_INDEX_START,
296         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
297         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
298         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
299         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
300         /* EA (3 sub-ranges)*/
301         TA_RAS_BLOCK__GFX_EA_INDEX_START,
302         /* EA range 0*/
303         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
304         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
305         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
306         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
307         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
308         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
309         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
310         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
311         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
312         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
313         /* EA range 1*/
314         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
315         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
316         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
317         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
318         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
319         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
320         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
321         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
322         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
323         /* EA range 2*/
324         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
325         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
326         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
327         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
328         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
329         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
330         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
331         /* UTC VM L2 bank*/
332         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
333         /* UTC VM walker*/
334         TA_RAS_BLOCK__UTC_VML2_WALKER,
335         /* UTC ATC L2 2MB cache*/
336         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
337         /* UTC ATC L2 4KB cache*/
338         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
339         TA_RAS_BLOCK__GFX_MAX
340 };
341
342 struct ras_gfx_subblock {
343         unsigned char *name;
344         int ta_subblock;
345         int hw_supported_error_type;
346         int sw_supported_error_type;
347 };
348
349 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
350         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
351                 #subblock,                                                     \
352                 TA_RAS_BLOCK__##subblock,                                      \
353                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
354                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
355         }
356
357 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
358         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
359         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
360         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
361         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
362         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
363         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
365         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
367         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
368         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
369         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
371         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
372         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
374         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
375                              0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
377                              0),
378         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
380         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
381         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
382         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
384         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
386                              0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
388                              0),
389         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
390                              0, 0),
391         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
392                              0),
393         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
394                              0, 0),
395         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
396                              0),
397         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
398                              1),
399         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
400                              0, 0, 0),
401         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
402                              0),
403         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
404                              0),
405         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
406                              0),
407         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
408                              0),
409         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
410                              0),
411         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
412                              0, 0),
413         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
414                              0),
415         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
416                              0),
417         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
418                              0, 0, 0),
419         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
420                              0),
421         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
422                              0),
423         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
424                              0),
425         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
426                              0),
427         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
428                              0),
429         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
430                              0, 0),
431         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
432                              0),
433         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
434         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
435         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
438         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
440         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
442                              1),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
444                              1),
445         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
446                              1),
447         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
448                              0),
449         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
450                              0),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
462         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
463                              0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
466                              0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
468                              0, 0),
469         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
470                              0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
479         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
480         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
483         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
501         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
502         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
503         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
504         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
505 };
506
507 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
508 {
509         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
510         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
511         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
528         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
529 };
530
531 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
532 {
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
535         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
550         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
551 };
552
553 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
554 {
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
557         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
565         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
566 };
567
568 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
569 {
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
572         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
593         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
594 };
595
596 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
597 {
598         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
599         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
600         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
601         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
602         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
603         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
604         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
605 };
606
607 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
608 {
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
611         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
627         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
628 };
629
630 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
631 {
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
634         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
643         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
644 };
645
646 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
647 {
648         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
649         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
650         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
651 };
652
653 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
654 {
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
670         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
671 };
672
673 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
674 {
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
677         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
687         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
688 };
689
690 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
691 {
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
694         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
698         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
699         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
700         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
701         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
702         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
703 };
704
705 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
706         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
707         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
708 };
709
710 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
711 {
712         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
713         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
714         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
715         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
720 };
721
722 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
723 {
724         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
725         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
726         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
727         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
732 };
733
734 static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
735 {
736         static void *scratch_reg0;
737         static void *scratch_reg1;
738         static void *scratch_reg2;
739         static void *scratch_reg3;
740         static void *spare_int;
741         static uint32_t grbm_cntl;
742         static uint32_t grbm_idx;
743
744         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
745         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
746         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
747         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
748         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
749
750         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
751         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
752
753         if (amdgpu_sriov_runtime(adev)) {
754                 pr_err("shouldn't call rlcg write register during runtime\n");
755                 return;
756         }
757
758         if (offset == grbm_cntl || offset == grbm_idx) {
759                 if (offset  == grbm_cntl)
760                         writel(v, scratch_reg2);
761                 else if (offset == grbm_idx)
762                         writel(v, scratch_reg3);
763
764                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
765         } else {
766                 uint32_t i = 0;
767                 uint32_t retries = 50000;
768
769                 writel(v, scratch_reg0);
770                 writel(offset | 0x80000000, scratch_reg1);
771                 writel(1, spare_int);
772                 for (i = 0; i < retries; i++) {
773                         u32 tmp;
774
775                         tmp = readl(scratch_reg1);
776                         if (!(tmp & 0x80000000))
777                                 break;
778
779                         udelay(10);
780                 }
781                 if (i >= retries)
782                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
783         }
784
785 }
786
787 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
788 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
789 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
790 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
791
792 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
793 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
794 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
795 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
796 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
797                                 struct amdgpu_cu_info *cu_info);
798 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
799 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
800 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
801 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
802                                           void *ras_error_status);
803 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
804                                      void *inject_if);
805 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
806
807 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
808                                 uint64_t queue_mask)
809 {
810         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
811         amdgpu_ring_write(kiq_ring,
812                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
813                 /* vmid_mask:0* queue_type:0 (KIQ) */
814                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
815         amdgpu_ring_write(kiq_ring,
816                         lower_32_bits(queue_mask));     /* queue mask lo */
817         amdgpu_ring_write(kiq_ring,
818                         upper_32_bits(queue_mask));     /* queue mask hi */
819         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
820         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
821         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
822         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
823 }
824
825 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
826                                  struct amdgpu_ring *ring)
827 {
828         struct amdgpu_device *adev = kiq_ring->adev;
829         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
830         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
831         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
832
833         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
834         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
835         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
836                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
837                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
838                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
839                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
840                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
841                          /*queue_type: normal compute queue */
842                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
843                          /* alloc format: all_on_one_pipe */
844                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
845                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
846                          /* num_queues: must be 1 */
847                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
848         amdgpu_ring_write(kiq_ring,
849                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
850         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
851         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
852         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
853         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
854 }
855
856 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
857                                    struct amdgpu_ring *ring,
858                                    enum amdgpu_unmap_queues_action action,
859                                    u64 gpu_addr, u64 seq)
860 {
861         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
862
863         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
864         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
865                           PACKET3_UNMAP_QUEUES_ACTION(action) |
866                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
867                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
868                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
869         amdgpu_ring_write(kiq_ring,
870                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
871
872         if (action == PREEMPT_QUEUES_NO_UNMAP) {
873                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
874                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
875                 amdgpu_ring_write(kiq_ring, seq);
876         } else {
877                 amdgpu_ring_write(kiq_ring, 0);
878                 amdgpu_ring_write(kiq_ring, 0);
879                 amdgpu_ring_write(kiq_ring, 0);
880         }
881 }
882
883 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
884                                    struct amdgpu_ring *ring,
885                                    u64 addr,
886                                    u64 seq)
887 {
888         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
889
890         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
891         amdgpu_ring_write(kiq_ring,
892                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
893                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
894                           PACKET3_QUERY_STATUS_COMMAND(2));
895         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
896         amdgpu_ring_write(kiq_ring,
897                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
898                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
899         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
900         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
901         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
902         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
903 }
904
905 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
906                                 uint16_t pasid, uint32_t flush_type,
907                                 bool all_hub)
908 {
909         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
910         amdgpu_ring_write(kiq_ring,
911                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
912                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
913                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
914                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
915 }
916
917 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
918         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
919         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
920         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
921         .kiq_query_status = gfx_v9_0_kiq_query_status,
922         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
923         .set_resources_size = 8,
924         .map_queues_size = 7,
925         .unmap_queues_size = 6,
926         .query_status_size = 7,
927         .invalidate_tlbs_size = 2,
928 };
929
930 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
931 {
932         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
933 }
934
935 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
936 {
937         switch (adev->asic_type) {
938         case CHIP_VEGA10:
939                 soc15_program_register_sequence(adev,
940                                                 golden_settings_gc_9_0,
941                                                 ARRAY_SIZE(golden_settings_gc_9_0));
942                 soc15_program_register_sequence(adev,
943                                                 golden_settings_gc_9_0_vg10,
944                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
945                 break;
946         case CHIP_VEGA12:
947                 soc15_program_register_sequence(adev,
948                                                 golden_settings_gc_9_2_1,
949                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
950                 soc15_program_register_sequence(adev,
951                                                 golden_settings_gc_9_2_1_vg12,
952                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
953                 break;
954         case CHIP_VEGA20:
955                 soc15_program_register_sequence(adev,
956                                                 golden_settings_gc_9_0,
957                                                 ARRAY_SIZE(golden_settings_gc_9_0));
958                 soc15_program_register_sequence(adev,
959                                                 golden_settings_gc_9_0_vg20,
960                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
961                 break;
962         case CHIP_ARCTURUS:
963                 soc15_program_register_sequence(adev,
964                                                 golden_settings_gc_9_4_1_arct,
965                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
966                 break;
967         case CHIP_RAVEN:
968                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
969                                                 ARRAY_SIZE(golden_settings_gc_9_1));
970                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
971                         soc15_program_register_sequence(adev,
972                                                         golden_settings_gc_9_1_rv2,
973                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
974                 else
975                         soc15_program_register_sequence(adev,
976                                                         golden_settings_gc_9_1_rv1,
977                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
978                 break;
979          case CHIP_RENOIR:
980                 soc15_program_register_sequence(adev,
981                                                 golden_settings_gc_9_1_rn,
982                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
983                 return; /* for renoir, don't need common goldensetting */
984         default:
985                 break;
986         }
987
988         if (adev->asic_type != CHIP_ARCTURUS)
989                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
990                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
991 }
992
993 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
994 {
995         adev->gfx.scratch.num_reg = 8;
996         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
997         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
998 }
999
1000 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1001                                        bool wc, uint32_t reg, uint32_t val)
1002 {
1003         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1004         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1005                                 WRITE_DATA_DST_SEL(0) |
1006                                 (wc ? WR_CONFIRM : 0));
1007         amdgpu_ring_write(ring, reg);
1008         amdgpu_ring_write(ring, 0);
1009         amdgpu_ring_write(ring, val);
1010 }
1011
1012 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1013                                   int mem_space, int opt, uint32_t addr0,
1014                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1015                                   uint32_t inv)
1016 {
1017         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1018         amdgpu_ring_write(ring,
1019                                  /* memory (1) or register (0) */
1020                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1021                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1022                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1023                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1024
1025         if (mem_space)
1026                 BUG_ON(addr0 & 0x3); /* Dword align */
1027         amdgpu_ring_write(ring, addr0);
1028         amdgpu_ring_write(ring, addr1);
1029         amdgpu_ring_write(ring, ref);
1030         amdgpu_ring_write(ring, mask);
1031         amdgpu_ring_write(ring, inv); /* poll interval */
1032 }
1033
1034 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1035 {
1036         struct amdgpu_device *adev = ring->adev;
1037         uint32_t scratch;
1038         uint32_t tmp = 0;
1039         unsigned i;
1040         int r;
1041
1042         r = amdgpu_gfx_scratch_get(adev, &scratch);
1043         if (r)
1044                 return r;
1045
1046         WREG32(scratch, 0xCAFEDEAD);
1047         r = amdgpu_ring_alloc(ring, 3);
1048         if (r)
1049                 goto error_free_scratch;
1050
1051         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1052         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1053         amdgpu_ring_write(ring, 0xDEADBEEF);
1054         amdgpu_ring_commit(ring);
1055
1056         for (i = 0; i < adev->usec_timeout; i++) {
1057                 tmp = RREG32(scratch);
1058                 if (tmp == 0xDEADBEEF)
1059                         break;
1060                 udelay(1);
1061         }
1062
1063         if (i >= adev->usec_timeout)
1064                 r = -ETIMEDOUT;
1065
1066 error_free_scratch:
1067         amdgpu_gfx_scratch_free(adev, scratch);
1068         return r;
1069 }
1070
1071 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1072 {
1073         struct amdgpu_device *adev = ring->adev;
1074         struct amdgpu_ib ib;
1075         struct dma_fence *f = NULL;
1076
1077         unsigned index;
1078         uint64_t gpu_addr;
1079         uint32_t tmp;
1080         long r;
1081
1082         r = amdgpu_device_wb_get(adev, &index);
1083         if (r)
1084                 return r;
1085
1086         gpu_addr = adev->wb.gpu_addr + (index * 4);
1087         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1088         memset(&ib, 0, sizeof(ib));
1089         r = amdgpu_ib_get(adev, NULL, 16,
1090                                         AMDGPU_IB_POOL_DIRECT, &ib);
1091         if (r)
1092                 goto err1;
1093
1094         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1095         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1096         ib.ptr[2] = lower_32_bits(gpu_addr);
1097         ib.ptr[3] = upper_32_bits(gpu_addr);
1098         ib.ptr[4] = 0xDEADBEEF;
1099         ib.length_dw = 5;
1100
1101         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1102         if (r)
1103                 goto err2;
1104
1105         r = dma_fence_wait_timeout(f, false, timeout);
1106         if (r == 0) {
1107                 r = -ETIMEDOUT;
1108                 goto err2;
1109         } else if (r < 0) {
1110                 goto err2;
1111         }
1112
1113         tmp = adev->wb.wb[index];
1114         if (tmp == 0xDEADBEEF)
1115                 r = 0;
1116         else
1117                 r = -EINVAL;
1118
1119 err2:
1120         amdgpu_ib_free(adev, &ib, NULL);
1121         dma_fence_put(f);
1122 err1:
1123         amdgpu_device_wb_free(adev, index);
1124         return r;
1125 }
1126
1127
1128 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1129 {
1130         release_firmware(adev->gfx.pfp_fw);
1131         adev->gfx.pfp_fw = NULL;
1132         release_firmware(adev->gfx.me_fw);
1133         adev->gfx.me_fw = NULL;
1134         release_firmware(adev->gfx.ce_fw);
1135         adev->gfx.ce_fw = NULL;
1136         release_firmware(adev->gfx.rlc_fw);
1137         adev->gfx.rlc_fw = NULL;
1138         release_firmware(adev->gfx.mec_fw);
1139         adev->gfx.mec_fw = NULL;
1140         release_firmware(adev->gfx.mec2_fw);
1141         adev->gfx.mec2_fw = NULL;
1142
1143         kfree(adev->gfx.rlc.register_list_format);
1144 }
1145
1146 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1147 {
1148         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1149
1150         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1151         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1152         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1153         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1154         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1155         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1156         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1157         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1158         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1159         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1160         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1161         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1162         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1163         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1164                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1165 }
1166
1167 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1168 {
1169         adev->gfx.me_fw_write_wait = false;
1170         adev->gfx.mec_fw_write_wait = false;
1171
1172         if ((adev->asic_type != CHIP_ARCTURUS) &&
1173             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1174             (adev->gfx.mec_feature_version < 46) ||
1175             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1176             (adev->gfx.pfp_feature_version < 46)))
1177                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1178
1179         switch (adev->asic_type) {
1180         case CHIP_VEGA10:
1181                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1182                     (adev->gfx.me_feature_version >= 42) &&
1183                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1184                     (adev->gfx.pfp_feature_version >= 42))
1185                         adev->gfx.me_fw_write_wait = true;
1186
1187                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1188                     (adev->gfx.mec_feature_version >= 42))
1189                         adev->gfx.mec_fw_write_wait = true;
1190                 break;
1191         case CHIP_VEGA12:
1192                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1193                     (adev->gfx.me_feature_version >= 44) &&
1194                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1195                     (adev->gfx.pfp_feature_version >= 44))
1196                         adev->gfx.me_fw_write_wait = true;
1197
1198                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1199                     (adev->gfx.mec_feature_version >= 44))
1200                         adev->gfx.mec_fw_write_wait = true;
1201                 break;
1202         case CHIP_VEGA20:
1203                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1204                     (adev->gfx.me_feature_version >= 44) &&
1205                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1206                     (adev->gfx.pfp_feature_version >= 44))
1207                         adev->gfx.me_fw_write_wait = true;
1208
1209                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1210                     (adev->gfx.mec_feature_version >= 44))
1211                         adev->gfx.mec_fw_write_wait = true;
1212                 break;
1213         case CHIP_RAVEN:
1214                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1215                     (adev->gfx.me_feature_version >= 42) &&
1216                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1217                     (adev->gfx.pfp_feature_version >= 42))
1218                         adev->gfx.me_fw_write_wait = true;
1219
1220                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1221                     (adev->gfx.mec_feature_version >= 42))
1222                         adev->gfx.mec_fw_write_wait = true;
1223                 break;
1224         default:
1225                 adev->gfx.me_fw_write_wait = true;
1226                 adev->gfx.mec_fw_write_wait = true;
1227                 break;
1228         }
1229 }
1230
1231 struct amdgpu_gfxoff_quirk {
1232         u16 chip_vendor;
1233         u16 chip_device;
1234         u16 subsys_vendor;
1235         u16 subsys_device;
1236         u8 revision;
1237 };
1238
1239 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1240         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1241         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1242         /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1243         { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1244         /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1245         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1246         { 0, 0, 0, 0, 0 },
1247 };
1248
1249 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1250 {
1251         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1252
1253         while (p && p->chip_device != 0) {
1254                 if (pdev->vendor == p->chip_vendor &&
1255                     pdev->device == p->chip_device &&
1256                     pdev->subsystem_vendor == p->subsys_vendor &&
1257                     pdev->subsystem_device == p->subsys_device &&
1258                     pdev->revision == p->revision) {
1259                         return true;
1260                 }
1261                 ++p;
1262         }
1263         return false;
1264 }
1265
1266 static bool is_raven_kicker(struct amdgpu_device *adev)
1267 {
1268         if (adev->pm.fw_version >= 0x41e2b)
1269                 return true;
1270         else
1271                 return false;
1272 }
1273
1274 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1275 {
1276         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1277                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1278
1279         switch (adev->asic_type) {
1280         case CHIP_VEGA10:
1281         case CHIP_VEGA12:
1282         case CHIP_VEGA20:
1283                 break;
1284         case CHIP_RAVEN:
1285                 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1286                       (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1287                     ((!is_raven_kicker(adev) &&
1288                       adev->gfx.rlc_fw_version < 531) ||
1289                      (adev->gfx.rlc_feature_version < 1) ||
1290                      !adev->gfx.rlc.is_rlc_v2_1))
1291                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1292
1293                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1294                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1295                                 AMD_PG_SUPPORT_CP |
1296                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1297                 break;
1298         case CHIP_RENOIR:
1299                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1300                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1301                                 AMD_PG_SUPPORT_CP |
1302                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1303                 break;
1304         default:
1305                 break;
1306         }
1307 }
1308
1309 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1310                                           const char *chip_name)
1311 {
1312         char fw_name[30];
1313         int err;
1314         struct amdgpu_firmware_info *info = NULL;
1315         const struct common_firmware_header *header = NULL;
1316         const struct gfx_firmware_header_v1_0 *cp_hdr;
1317
1318         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1319         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1320         if (err)
1321                 goto out;
1322         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1323         if (err)
1324                 goto out;
1325         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1326         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1327         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1328
1329         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1330         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1331         if (err)
1332                 goto out;
1333         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1334         if (err)
1335                 goto out;
1336         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1337         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1338         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1339
1340         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1341         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1342         if (err)
1343                 goto out;
1344         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1345         if (err)
1346                 goto out;
1347         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1348         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1349         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1350
1351         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1352                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1353                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1354                 info->fw = adev->gfx.pfp_fw;
1355                 header = (const struct common_firmware_header *)info->fw->data;
1356                 adev->firmware.fw_size +=
1357                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1358
1359                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1360                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1361                 info->fw = adev->gfx.me_fw;
1362                 header = (const struct common_firmware_header *)info->fw->data;
1363                 adev->firmware.fw_size +=
1364                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1365
1366                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1367                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1368                 info->fw = adev->gfx.ce_fw;
1369                 header = (const struct common_firmware_header *)info->fw->data;
1370                 adev->firmware.fw_size +=
1371                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1372         }
1373
1374 out:
1375         if (err) {
1376                 dev_err(adev->dev,
1377                         "gfx9: Failed to load firmware \"%s\"\n",
1378                         fw_name);
1379                 release_firmware(adev->gfx.pfp_fw);
1380                 adev->gfx.pfp_fw = NULL;
1381                 release_firmware(adev->gfx.me_fw);
1382                 adev->gfx.me_fw = NULL;
1383                 release_firmware(adev->gfx.ce_fw);
1384                 adev->gfx.ce_fw = NULL;
1385         }
1386         return err;
1387 }
1388
1389 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1390                                           const char *chip_name)
1391 {
1392         char fw_name[30];
1393         int err;
1394         struct amdgpu_firmware_info *info = NULL;
1395         const struct common_firmware_header *header = NULL;
1396         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1397         unsigned int *tmp = NULL;
1398         unsigned int i = 0;
1399         uint16_t version_major;
1400         uint16_t version_minor;
1401         uint32_t smu_version;
1402
1403         /*
1404          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1405          * instead of picasso_rlc.bin.
1406          * Judgment method:
1407          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1408          *          or revision >= 0xD8 && revision <= 0xDF
1409          * otherwise is PCO FP5
1410          */
1411         if (!strcmp(chip_name, "picasso") &&
1412                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1413                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1414                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1415         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1416                 (smu_version >= 0x41e2b))
1417                 /**
1418                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1419                 */
1420                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1421         else
1422                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1423         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1424         if (err)
1425                 goto out;
1426         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1427         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1428
1429         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1430         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1431         if (version_major == 2 && version_minor == 1)
1432                 adev->gfx.rlc.is_rlc_v2_1 = true;
1433
1434         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1435         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1436         adev->gfx.rlc.save_and_restore_offset =
1437                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1438         adev->gfx.rlc.clear_state_descriptor_offset =
1439                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1440         adev->gfx.rlc.avail_scratch_ram_locations =
1441                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1442         adev->gfx.rlc.reg_restore_list_size =
1443                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1444         adev->gfx.rlc.reg_list_format_start =
1445                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1446         adev->gfx.rlc.reg_list_format_separate_start =
1447                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1448         adev->gfx.rlc.starting_offsets_start =
1449                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1450         adev->gfx.rlc.reg_list_format_size_bytes =
1451                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1452         adev->gfx.rlc.reg_list_size_bytes =
1453                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1454         adev->gfx.rlc.register_list_format =
1455                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1456                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1457         if (!adev->gfx.rlc.register_list_format) {
1458                 err = -ENOMEM;
1459                 goto out;
1460         }
1461
1462         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1463                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1464         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1465                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1466
1467         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1468
1469         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1470                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1471         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1472                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1473
1474         if (adev->gfx.rlc.is_rlc_v2_1)
1475                 gfx_v9_0_init_rlc_ext_microcode(adev);
1476
1477         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1478                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1479                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1480                 info->fw = adev->gfx.rlc_fw;
1481                 header = (const struct common_firmware_header *)info->fw->data;
1482                 adev->firmware.fw_size +=
1483                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1484
1485                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1486                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1487                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1488                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1489                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1490                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1491                         info->fw = adev->gfx.rlc_fw;
1492                         adev->firmware.fw_size +=
1493                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1494
1495                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1496                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1497                         info->fw = adev->gfx.rlc_fw;
1498                         adev->firmware.fw_size +=
1499                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1500
1501                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1502                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1503                         info->fw = adev->gfx.rlc_fw;
1504                         adev->firmware.fw_size +=
1505                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1506                 }
1507         }
1508
1509 out:
1510         if (err) {
1511                 dev_err(adev->dev,
1512                         "gfx9: Failed to load firmware \"%s\"\n",
1513                         fw_name);
1514                 release_firmware(adev->gfx.rlc_fw);
1515                 adev->gfx.rlc_fw = NULL;
1516         }
1517         return err;
1518 }
1519
1520 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1521                                           const char *chip_name)
1522 {
1523         char fw_name[30];
1524         int err;
1525         struct amdgpu_firmware_info *info = NULL;
1526         const struct common_firmware_header *header = NULL;
1527         const struct gfx_firmware_header_v1_0 *cp_hdr;
1528
1529         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1530         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1531         if (err)
1532                 goto out;
1533         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1534         if (err)
1535                 goto out;
1536         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1537         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1538         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1539
1540
1541         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1542         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1543         if (!err) {
1544                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1545                 if (err)
1546                         goto out;
1547                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1548                 adev->gfx.mec2_fw->data;
1549                 adev->gfx.mec2_fw_version =
1550                 le32_to_cpu(cp_hdr->header.ucode_version);
1551                 adev->gfx.mec2_feature_version =
1552                 le32_to_cpu(cp_hdr->ucode_feature_version);
1553         } else {
1554                 err = 0;
1555                 adev->gfx.mec2_fw = NULL;
1556         }
1557
1558         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1559                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1560                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1561                 info->fw = adev->gfx.mec_fw;
1562                 header = (const struct common_firmware_header *)info->fw->data;
1563                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1564                 adev->firmware.fw_size +=
1565                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1566
1567                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1568                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1569                 info->fw = adev->gfx.mec_fw;
1570                 adev->firmware.fw_size +=
1571                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1572
1573                 if (adev->gfx.mec2_fw) {
1574                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1575                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1576                         info->fw = adev->gfx.mec2_fw;
1577                         header = (const struct common_firmware_header *)info->fw->data;
1578                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1579                         adev->firmware.fw_size +=
1580                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1581
1582                         /* TODO: Determine if MEC2 JT FW loading can be removed
1583                                  for all GFX V9 asic and above */
1584                         if (adev->asic_type != CHIP_ARCTURUS &&
1585                             adev->asic_type != CHIP_RENOIR) {
1586                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1587                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1588                                 info->fw = adev->gfx.mec2_fw;
1589                                 adev->firmware.fw_size +=
1590                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1591                                         PAGE_SIZE);
1592                         }
1593                 }
1594         }
1595
1596 out:
1597         gfx_v9_0_check_if_need_gfxoff(adev);
1598         gfx_v9_0_check_fw_write_wait(adev);
1599         if (err) {
1600                 dev_err(adev->dev,
1601                         "gfx9: Failed to load firmware \"%s\"\n",
1602                         fw_name);
1603                 release_firmware(adev->gfx.mec_fw);
1604                 adev->gfx.mec_fw = NULL;
1605                 release_firmware(adev->gfx.mec2_fw);
1606                 adev->gfx.mec2_fw = NULL;
1607         }
1608         return err;
1609 }
1610
1611 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1612 {
1613         const char *chip_name;
1614         int r;
1615
1616         DRM_DEBUG("\n");
1617
1618         switch (adev->asic_type) {
1619         case CHIP_VEGA10:
1620                 chip_name = "vega10";
1621                 break;
1622         case CHIP_VEGA12:
1623                 chip_name = "vega12";
1624                 break;
1625         case CHIP_VEGA20:
1626                 chip_name = "vega20";
1627                 break;
1628         case CHIP_RAVEN:
1629                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1630                         chip_name = "raven2";
1631                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1632                         chip_name = "picasso";
1633                 else
1634                         chip_name = "raven";
1635                 break;
1636         case CHIP_ARCTURUS:
1637                 chip_name = "arcturus";
1638                 break;
1639         case CHIP_RENOIR:
1640                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1641                         chip_name = "renoir";
1642                 else
1643                         chip_name = "green_sardine";
1644                 break;
1645         default:
1646                 BUG();
1647         }
1648
1649         /* No CPG in Arcturus */
1650         if (adev->gfx.num_gfx_rings) {
1651                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1652                 if (r)
1653                         return r;
1654         }
1655
1656         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1657         if (r)
1658                 return r;
1659
1660         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1661         if (r)
1662                 return r;
1663
1664         return r;
1665 }
1666
1667 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1668 {
1669         u32 count = 0;
1670         const struct cs_section_def *sect = NULL;
1671         const struct cs_extent_def *ext = NULL;
1672
1673         /* begin clear state */
1674         count += 2;
1675         /* context control state */
1676         count += 3;
1677
1678         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1679                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1680                         if (sect->id == SECT_CONTEXT)
1681                                 count += 2 + ext->reg_count;
1682                         else
1683                                 return 0;
1684                 }
1685         }
1686
1687         /* end clear state */
1688         count += 2;
1689         /* clear state */
1690         count += 2;
1691
1692         return count;
1693 }
1694
1695 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1696                                     volatile u32 *buffer)
1697 {
1698         u32 count = 0, i;
1699         const struct cs_section_def *sect = NULL;
1700         const struct cs_extent_def *ext = NULL;
1701
1702         if (adev->gfx.rlc.cs_data == NULL)
1703                 return;
1704         if (buffer == NULL)
1705                 return;
1706
1707         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1708         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1709
1710         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1711         buffer[count++] = cpu_to_le32(0x80000000);
1712         buffer[count++] = cpu_to_le32(0x80000000);
1713
1714         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1715                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1716                         if (sect->id == SECT_CONTEXT) {
1717                                 buffer[count++] =
1718                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1719                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1720                                                 PACKET3_SET_CONTEXT_REG_START);
1721                                 for (i = 0; i < ext->reg_count; i++)
1722                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1723                         } else {
1724                                 return;
1725                         }
1726                 }
1727         }
1728
1729         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1730         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1731
1732         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1733         buffer[count++] = cpu_to_le32(0);
1734 }
1735
1736 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1737 {
1738         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1739         uint32_t pg_always_on_cu_num = 2;
1740         uint32_t always_on_cu_num;
1741         uint32_t i, j, k;
1742         uint32_t mask, cu_bitmap, counter;
1743
1744         if (adev->flags & AMD_IS_APU)
1745                 always_on_cu_num = 4;
1746         else if (adev->asic_type == CHIP_VEGA12)
1747                 always_on_cu_num = 8;
1748         else
1749                 always_on_cu_num = 12;
1750
1751         mutex_lock(&adev->grbm_idx_mutex);
1752         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1753                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1754                         mask = 1;
1755                         cu_bitmap = 0;
1756                         counter = 0;
1757                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1758
1759                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1760                                 if (cu_info->bitmap[i][j] & mask) {
1761                                         if (counter == pg_always_on_cu_num)
1762                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1763                                         if (counter < always_on_cu_num)
1764                                                 cu_bitmap |= mask;
1765                                         else
1766                                                 break;
1767                                         counter++;
1768                                 }
1769                                 mask <<= 1;
1770                         }
1771
1772                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1773                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1774                 }
1775         }
1776         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1777         mutex_unlock(&adev->grbm_idx_mutex);
1778 }
1779
1780 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1781 {
1782         uint32_t data;
1783
1784         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1785         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1786         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1787         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1788         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1789
1790         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1791         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1792
1793         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1794         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1795
1796         mutex_lock(&adev->grbm_idx_mutex);
1797         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1798         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1799         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1800
1801         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1802         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1803         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1804         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1805         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1806
1807         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1808         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1809         data &= 0x0000FFFF;
1810         data |= 0x00C00000;
1811         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1812
1813         /*
1814          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1815          * programmed in gfx_v9_0_init_always_on_cu_mask()
1816          */
1817
1818         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1819          * but used for RLC_LB_CNTL configuration */
1820         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1821         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1822         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1823         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1824         mutex_unlock(&adev->grbm_idx_mutex);
1825
1826         gfx_v9_0_init_always_on_cu_mask(adev);
1827 }
1828
1829 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1830 {
1831         uint32_t data;
1832
1833         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1834         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1835         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1836         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1837         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1838
1839         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1840         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1841
1842         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1843         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1844
1845         mutex_lock(&adev->grbm_idx_mutex);
1846         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1847         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1848         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1849
1850         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1851         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1852         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1853         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1854         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1855
1856         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1857         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1858         data &= 0x0000FFFF;
1859         data |= 0x00C00000;
1860         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1861
1862         /*
1863          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1864          * programmed in gfx_v9_0_init_always_on_cu_mask()
1865          */
1866
1867         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1868          * but used for RLC_LB_CNTL configuration */
1869         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1870         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1871         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1872         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1873         mutex_unlock(&adev->grbm_idx_mutex);
1874
1875         gfx_v9_0_init_always_on_cu_mask(adev);
1876 }
1877
1878 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1879 {
1880         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1881 }
1882
1883 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1884 {
1885         return 5;
1886 }
1887
1888 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1889 {
1890         const struct cs_section_def *cs_data;
1891         int r;
1892
1893         adev->gfx.rlc.cs_data = gfx9_cs_data;
1894
1895         cs_data = adev->gfx.rlc.cs_data;
1896
1897         if (cs_data) {
1898                 /* init clear state block */
1899                 r = amdgpu_gfx_rlc_init_csb(adev);
1900                 if (r)
1901                         return r;
1902         }
1903
1904         if (adev->flags & AMD_IS_APU) {
1905                 /* TODO: double check the cp_table_size for RV */
1906                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1907                 r = amdgpu_gfx_rlc_init_cpt(adev);
1908                 if (r)
1909                         return r;
1910         }
1911
1912         switch (adev->asic_type) {
1913         case CHIP_RAVEN:
1914                 gfx_v9_0_init_lbpw(adev);
1915                 break;
1916         case CHIP_VEGA20:
1917                 gfx_v9_4_init_lbpw(adev);
1918                 break;
1919         default:
1920                 break;
1921         }
1922
1923         /* init spm vmid with 0xf */
1924         if (adev->gfx.rlc.funcs->update_spm_vmid)
1925                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1926
1927         return 0;
1928 }
1929
1930 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1931 {
1932         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1933         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1934 }
1935
1936 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1937 {
1938         int r;
1939         u32 *hpd;
1940         const __le32 *fw_data;
1941         unsigned fw_size;
1942         u32 *fw;
1943         size_t mec_hpd_size;
1944
1945         const struct gfx_firmware_header_v1_0 *mec_hdr;
1946
1947         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1948
1949         /* take ownership of the relevant compute queues */
1950         amdgpu_gfx_compute_queue_acquire(adev);
1951         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1952         if (mec_hpd_size) {
1953                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1954                                               AMDGPU_GEM_DOMAIN_VRAM,
1955                                               &adev->gfx.mec.hpd_eop_obj,
1956                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1957                                               (void **)&hpd);
1958                 if (r) {
1959                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1960                         gfx_v9_0_mec_fini(adev);
1961                         return r;
1962                 }
1963
1964                 memset(hpd, 0, mec_hpd_size);
1965
1966                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1967                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1968         }
1969
1970         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1971
1972         fw_data = (const __le32 *)
1973                 (adev->gfx.mec_fw->data +
1974                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1975         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1976
1977         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1978                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1979                                       &adev->gfx.mec.mec_fw_obj,
1980                                       &adev->gfx.mec.mec_fw_gpu_addr,
1981                                       (void **)&fw);
1982         if (r) {
1983                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1984                 gfx_v9_0_mec_fini(adev);
1985                 return r;
1986         }
1987
1988         memcpy(fw, fw_data, fw_size);
1989
1990         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1991         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1992
1993         return 0;
1994 }
1995
1996 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1997 {
1998         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1999                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2000                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2001                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
2002                 (SQ_IND_INDEX__FORCE_READ_MASK));
2003         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2004 }
2005
2006 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2007                            uint32_t wave, uint32_t thread,
2008                            uint32_t regno, uint32_t num, uint32_t *out)
2009 {
2010         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2011                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2012                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2013                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2014                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2015                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2016                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2017         while (num--)
2018                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2019 }
2020
2021 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2022 {
2023         /* type 1 wave data */
2024         dst[(*no_fields)++] = 1;
2025         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2026         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2027         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2028         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2029         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2030         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2031         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2032         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2033         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2034         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2035         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2036         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2037         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2038         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2039 }
2040
2041 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2042                                      uint32_t wave, uint32_t start,
2043                                      uint32_t size, uint32_t *dst)
2044 {
2045         wave_read_regs(
2046                 adev, simd, wave, 0,
2047                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2048 }
2049
2050 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2051                                      uint32_t wave, uint32_t thread,
2052                                      uint32_t start, uint32_t size,
2053                                      uint32_t *dst)
2054 {
2055         wave_read_regs(
2056                 adev, simd, wave, thread,
2057                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2058 }
2059
2060 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2061                                   u32 me, u32 pipe, u32 q, u32 vm)
2062 {
2063         soc15_grbm_select(adev, me, pipe, q, vm);
2064 }
2065
2066 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2067         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2068         .select_se_sh = &gfx_v9_0_select_se_sh,
2069         .read_wave_data = &gfx_v9_0_read_wave_data,
2070         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2071         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2072         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2073         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2074         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2075         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2076 };
2077
2078 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2079         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2080         .select_se_sh = &gfx_v9_0_select_se_sh,
2081         .read_wave_data = &gfx_v9_0_read_wave_data,
2082         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2083         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2084         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2085         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2086         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2087         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2088         .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
2089 };
2090
2091 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2092 {
2093         u32 gb_addr_config;
2094         int err;
2095
2096         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2097
2098         switch (adev->asic_type) {
2099         case CHIP_VEGA10:
2100                 adev->gfx.config.max_hw_contexts = 8;
2101                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2102                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2103                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2104                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2105                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2106                 break;
2107         case CHIP_VEGA12:
2108                 adev->gfx.config.max_hw_contexts = 8;
2109                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2110                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2111                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2112                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2113                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2114                 DRM_INFO("fix gfx.config for vega12\n");
2115                 break;
2116         case CHIP_VEGA20:
2117                 adev->gfx.config.max_hw_contexts = 8;
2118                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2119                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2120                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2121                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2122                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2123                 gb_addr_config &= ~0xf3e777ff;
2124                 gb_addr_config |= 0x22014042;
2125                 /* check vbios table if gpu info is not available */
2126                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2127                 if (err)
2128                         return err;
2129                 break;
2130         case CHIP_RAVEN:
2131                 adev->gfx.config.max_hw_contexts = 8;
2132                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2133                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2134                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2135                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2136                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2137                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2138                 else
2139                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2140                 break;
2141         case CHIP_ARCTURUS:
2142                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2143                 adev->gfx.config.max_hw_contexts = 8;
2144                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2145                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2146                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2147                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2148                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2149                 gb_addr_config &= ~0xf3e777ff;
2150                 gb_addr_config |= 0x22014042;
2151                 break;
2152         case CHIP_RENOIR:
2153                 adev->gfx.config.max_hw_contexts = 8;
2154                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2155                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2156                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2157                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2158                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2159                 gb_addr_config &= ~0xf3e777ff;
2160                 gb_addr_config |= 0x22010042;
2161                 break;
2162         default:
2163                 BUG();
2164                 break;
2165         }
2166
2167         adev->gfx.config.gb_addr_config = gb_addr_config;
2168
2169         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2170                         REG_GET_FIELD(
2171                                         adev->gfx.config.gb_addr_config,
2172                                         GB_ADDR_CONFIG,
2173                                         NUM_PIPES);
2174
2175         adev->gfx.config.max_tile_pipes =
2176                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2177
2178         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2179                         REG_GET_FIELD(
2180                                         adev->gfx.config.gb_addr_config,
2181                                         GB_ADDR_CONFIG,
2182                                         NUM_BANKS);
2183         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2184                         REG_GET_FIELD(
2185                                         adev->gfx.config.gb_addr_config,
2186                                         GB_ADDR_CONFIG,
2187                                         MAX_COMPRESSED_FRAGS);
2188         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2189                         REG_GET_FIELD(
2190                                         adev->gfx.config.gb_addr_config,
2191                                         GB_ADDR_CONFIG,
2192                                         NUM_RB_PER_SE);
2193         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2194                         REG_GET_FIELD(
2195                                         adev->gfx.config.gb_addr_config,
2196                                         GB_ADDR_CONFIG,
2197                                         NUM_SHADER_ENGINES);
2198         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2199                         REG_GET_FIELD(
2200                                         adev->gfx.config.gb_addr_config,
2201                                         GB_ADDR_CONFIG,
2202                                         PIPE_INTERLEAVE_SIZE));
2203
2204         return 0;
2205 }
2206
2207 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2208                                       int mec, int pipe, int queue)
2209 {
2210         unsigned irq_type;
2211         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2212         unsigned int hw_prio;
2213
2214         ring = &adev->gfx.compute_ring[ring_id];
2215
2216         /* mec0 is me1 */
2217         ring->me = mec + 1;
2218         ring->pipe = pipe;
2219         ring->queue = queue;
2220
2221         ring->ring_obj = NULL;
2222         ring->use_doorbell = true;
2223         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2224         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2225                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2226         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2227
2228         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2229                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2230                 + ring->pipe;
2231         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
2232                                                             ring->queue) ?
2233                         AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2234         /* type-2 packets are deprecated on MEC, use type-3 instead */
2235         return amdgpu_ring_init(adev, ring, 1024,
2236                                 &adev->gfx.eop_irq, irq_type, hw_prio);
2237 }
2238
2239 static int gfx_v9_0_sw_init(void *handle)
2240 {
2241         int i, j, k, r, ring_id;
2242         struct amdgpu_ring *ring;
2243         struct amdgpu_kiq *kiq;
2244         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2245
2246         switch (adev->asic_type) {
2247         case CHIP_VEGA10:
2248         case CHIP_VEGA12:
2249         case CHIP_VEGA20:
2250         case CHIP_RAVEN:
2251         case CHIP_ARCTURUS:
2252         case CHIP_RENOIR:
2253                 adev->gfx.mec.num_mec = 2;
2254                 break;
2255         default:
2256                 adev->gfx.mec.num_mec = 1;
2257                 break;
2258         }
2259
2260         adev->gfx.mec.num_pipe_per_mec = 4;
2261         adev->gfx.mec.num_queue_per_pipe = 8;
2262
2263         /* EOP Event */
2264         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2265         if (r)
2266                 return r;
2267
2268         /* Privileged reg */
2269         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2270                               &adev->gfx.priv_reg_irq);
2271         if (r)
2272                 return r;
2273
2274         /* Privileged inst */
2275         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2276                               &adev->gfx.priv_inst_irq);
2277         if (r)
2278                 return r;
2279
2280         /* ECC error */
2281         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2282                               &adev->gfx.cp_ecc_error_irq);
2283         if (r)
2284                 return r;
2285
2286         /* FUE error */
2287         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2288                               &adev->gfx.cp_ecc_error_irq);
2289         if (r)
2290                 return r;
2291
2292         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2293
2294         gfx_v9_0_scratch_init(adev);
2295
2296         r = gfx_v9_0_init_microcode(adev);
2297         if (r) {
2298                 DRM_ERROR("Failed to load gfx firmware!\n");
2299                 return r;
2300         }
2301
2302         r = adev->gfx.rlc.funcs->init(adev);
2303         if (r) {
2304                 DRM_ERROR("Failed to init rlc BOs!\n");
2305                 return r;
2306         }
2307
2308         r = gfx_v9_0_mec_init(adev);
2309         if (r) {
2310                 DRM_ERROR("Failed to init MEC BOs!\n");
2311                 return r;
2312         }
2313
2314         /* set up the gfx ring */
2315         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2316                 ring = &adev->gfx.gfx_ring[i];
2317                 ring->ring_obj = NULL;
2318                 if (!i)
2319                         sprintf(ring->name, "gfx");
2320                 else
2321                         sprintf(ring->name, "gfx_%d", i);
2322                 ring->use_doorbell = true;
2323                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2324                 r = amdgpu_ring_init(adev, ring, 1024,
2325                                      &adev->gfx.eop_irq,
2326                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2327                                      AMDGPU_RING_PRIO_DEFAULT);
2328                 if (r)
2329                         return r;
2330         }
2331
2332         /* set up the compute queues - allocate horizontally across pipes */
2333         ring_id = 0;
2334         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2335                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2336                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2337                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2338                                         continue;
2339
2340                                 r = gfx_v9_0_compute_ring_init(adev,
2341                                                                ring_id,
2342                                                                i, k, j);
2343                                 if (r)
2344                                         return r;
2345
2346                                 ring_id++;
2347                         }
2348                 }
2349         }
2350
2351         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2352         if (r) {
2353                 DRM_ERROR("Failed to init KIQ BOs!\n");
2354                 return r;
2355         }
2356
2357         kiq = &adev->gfx.kiq;
2358         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2359         if (r)
2360                 return r;
2361
2362         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2363         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2364         if (r)
2365                 return r;
2366
2367         adev->gfx.ce_ram_size = 0x8000;
2368
2369         r = gfx_v9_0_gpu_early_init(adev);
2370         if (r)
2371                 return r;
2372
2373         return 0;
2374 }
2375
2376
2377 static int gfx_v9_0_sw_fini(void *handle)
2378 {
2379         int i;
2380         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2381
2382         amdgpu_gfx_ras_fini(adev);
2383
2384         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2385                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2386         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2387                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2388
2389         amdgpu_gfx_mqd_sw_fini(adev);
2390         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2391         amdgpu_gfx_kiq_fini(adev);
2392
2393         gfx_v9_0_mec_fini(adev);
2394         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2395         if (adev->flags & AMD_IS_APU) {
2396                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2397                                 &adev->gfx.rlc.cp_table_gpu_addr,
2398                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2399         }
2400         gfx_v9_0_free_microcode(adev);
2401
2402         return 0;
2403 }
2404
2405
2406 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2407 {
2408         /* TODO */
2409 }
2410
2411 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2412                            u32 instance)
2413 {
2414         u32 data;
2415
2416         if (instance == 0xffffffff)
2417                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2418         else
2419                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2420
2421         if (se_num == 0xffffffff)
2422                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2423         else
2424                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2425
2426         if (sh_num == 0xffffffff)
2427                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2428         else
2429                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2430
2431         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2432 }
2433
2434 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2435 {
2436         u32 data, mask;
2437
2438         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2439         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2440
2441         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2442         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2443
2444         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2445                                          adev->gfx.config.max_sh_per_se);
2446
2447         return (~data) & mask;
2448 }
2449
2450 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2451 {
2452         int i, j;
2453         u32 data;
2454         u32 active_rbs = 0;
2455         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2456                                         adev->gfx.config.max_sh_per_se;
2457
2458         mutex_lock(&adev->grbm_idx_mutex);
2459         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2460                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2461                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2462                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2463                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2464                                                rb_bitmap_width_per_sh);
2465                 }
2466         }
2467         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2468         mutex_unlock(&adev->grbm_idx_mutex);
2469
2470         adev->gfx.config.backend_enable_mask = active_rbs;
2471         adev->gfx.config.num_rbs = hweight32(active_rbs);
2472 }
2473
2474 #define DEFAULT_SH_MEM_BASES    (0x6000)
2475 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2476 {
2477         int i;
2478         uint32_t sh_mem_config;
2479         uint32_t sh_mem_bases;
2480
2481         /*
2482          * Configure apertures:
2483          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2484          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2485          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2486          */
2487         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2488
2489         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2490                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2491                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2492
2493         mutex_lock(&adev->srbm_mutex);
2494         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2495                 soc15_grbm_select(adev, 0, 0, 0, i);
2496                 /* CP and shaders */
2497                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2498                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2499         }
2500         soc15_grbm_select(adev, 0, 0, 0, 0);
2501         mutex_unlock(&adev->srbm_mutex);
2502
2503         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2504            acccess. These should be enabled by FW for target VMIDs. */
2505         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2506                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2507                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2508                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2509                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2510         }
2511 }
2512
2513 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2514 {
2515         int vmid;
2516
2517         /*
2518          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2519          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2520          * the driver can enable them for graphics. VMID0 should maintain
2521          * access so that HWS firmware can save/restore entries.
2522          */
2523         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2524                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2525                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2526                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2527                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2528         }
2529 }
2530
2531 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2532 {
2533         uint32_t tmp;
2534
2535         switch (adev->asic_type) {
2536         case CHIP_ARCTURUS:
2537                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2538                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2539                                         DISABLE_BARRIER_WAITCNT, 1);
2540                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2541                 break;
2542         default:
2543                 break;
2544         }
2545 }
2546
2547 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2548 {
2549         u32 tmp;
2550         int i;
2551
2552         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2553
2554         gfx_v9_0_tiling_mode_table_init(adev);
2555
2556         gfx_v9_0_setup_rb(adev);
2557         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2558         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2559
2560         /* XXX SH_MEM regs */
2561         /* where to put LDS, scratch, GPUVM in FSA64 space */
2562         mutex_lock(&adev->srbm_mutex);
2563         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2564                 soc15_grbm_select(adev, 0, 0, 0, i);
2565                 /* CP and shaders */
2566                 if (i == 0) {
2567                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2568                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2569                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2570                                             !!adev->gmc.noretry);
2571                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2572                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2573                 } else {
2574                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2575                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2576                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2577                                             !!adev->gmc.noretry);
2578                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2579                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2580                                 (adev->gmc.private_aperture_start >> 48));
2581                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2582                                 (adev->gmc.shared_aperture_start >> 48));
2583                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2584                 }
2585         }
2586         soc15_grbm_select(adev, 0, 0, 0, 0);
2587
2588         mutex_unlock(&adev->srbm_mutex);
2589
2590         gfx_v9_0_init_compute_vmid(adev);
2591         gfx_v9_0_init_gds_vmid(adev);
2592         gfx_v9_0_init_sq_config(adev);
2593 }
2594
2595 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2596 {
2597         u32 i, j, k;
2598         u32 mask;
2599
2600         mutex_lock(&adev->grbm_idx_mutex);
2601         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2602                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2603                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2604                         for (k = 0; k < adev->usec_timeout; k++) {
2605                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2606                                         break;
2607                                 udelay(1);
2608                         }
2609                         if (k == adev->usec_timeout) {
2610                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2611                                                       0xffffffff, 0xffffffff);
2612                                 mutex_unlock(&adev->grbm_idx_mutex);
2613                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2614                                          i, j);
2615                                 return;
2616                         }
2617                 }
2618         }
2619         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2620         mutex_unlock(&adev->grbm_idx_mutex);
2621
2622         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2623                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2624                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2625                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2626         for (k = 0; k < adev->usec_timeout; k++) {
2627                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2628                         break;
2629                 udelay(1);
2630         }
2631 }
2632
2633 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2634                                                bool enable)
2635 {
2636         u32 tmp;
2637
2638         /* don't toggle interrupts that are only applicable
2639          * to me0 pipe0 on AISCs that have me0 removed */
2640         if (!adev->gfx.num_gfx_rings)
2641                 return;
2642
2643         tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2644
2645         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2646         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2647         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2648         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2649
2650         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2651 }
2652
2653 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2654 {
2655         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2656         /* csib */
2657         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2658                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2659         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2660                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2661         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2662                         adev->gfx.rlc.clear_state_size);
2663 }
2664
2665 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2666                                 int indirect_offset,
2667                                 int list_size,
2668                                 int *unique_indirect_regs,
2669                                 int unique_indirect_reg_count,
2670                                 int *indirect_start_offsets,
2671                                 int *indirect_start_offsets_count,
2672                                 int max_start_offsets_count)
2673 {
2674         int idx;
2675
2676         for (; indirect_offset < list_size; indirect_offset++) {
2677                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2678                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2679                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2680
2681                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2682                         indirect_offset += 2;
2683
2684                         /* look for the matching indice */
2685                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2686                                 if (unique_indirect_regs[idx] ==
2687                                         register_list_format[indirect_offset] ||
2688                                         !unique_indirect_regs[idx])
2689                                         break;
2690                         }
2691
2692                         BUG_ON(idx >= unique_indirect_reg_count);
2693
2694                         if (!unique_indirect_regs[idx])
2695                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2696
2697                         indirect_offset++;
2698                 }
2699         }
2700 }
2701
2702 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2703 {
2704         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2705         int unique_indirect_reg_count = 0;
2706
2707         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2708         int indirect_start_offsets_count = 0;
2709
2710         int list_size = 0;
2711         int i = 0, j = 0;
2712         u32 tmp = 0;
2713
2714         u32 *register_list_format =
2715                 kmemdup(adev->gfx.rlc.register_list_format,
2716                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2717         if (!register_list_format)
2718                 return -ENOMEM;
2719
2720         /* setup unique_indirect_regs array and indirect_start_offsets array */
2721         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2722         gfx_v9_1_parse_ind_reg_list(register_list_format,
2723                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2724                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2725                                     unique_indirect_regs,
2726                                     unique_indirect_reg_count,
2727                                     indirect_start_offsets,
2728                                     &indirect_start_offsets_count,
2729                                     ARRAY_SIZE(indirect_start_offsets));
2730
2731         /* enable auto inc in case it is disabled */
2732         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2733         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2734         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2735
2736         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2737         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2738                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2739         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2740                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2741                         adev->gfx.rlc.register_restore[i]);
2742
2743         /* load indirect register */
2744         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2745                 adev->gfx.rlc.reg_list_format_start);
2746
2747         /* direct register portion */
2748         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2749                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2750                         register_list_format[i]);
2751
2752         /* indirect register portion */
2753         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2754                 if (register_list_format[i] == 0xFFFFFFFF) {
2755                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2756                         continue;
2757                 }
2758
2759                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2760                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2761
2762                 for (j = 0; j < unique_indirect_reg_count; j++) {
2763                         if (register_list_format[i] == unique_indirect_regs[j]) {
2764                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2765                                 break;
2766                         }
2767                 }
2768
2769                 BUG_ON(j >= unique_indirect_reg_count);
2770
2771                 i++;
2772         }
2773
2774         /* set save/restore list size */
2775         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2776         list_size = list_size >> 1;
2777         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2778                 adev->gfx.rlc.reg_restore_list_size);
2779         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2780
2781         /* write the starting offsets to RLC scratch ram */
2782         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2783                 adev->gfx.rlc.starting_offsets_start);
2784         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2785                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2786                        indirect_start_offsets[i]);
2787
2788         /* load unique indirect regs*/
2789         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2790                 if (unique_indirect_regs[i] != 0) {
2791                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2792                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2793                                unique_indirect_regs[i] & 0x3FFFF);
2794
2795                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2796                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2797                                unique_indirect_regs[i] >> 20);
2798                 }
2799         }
2800
2801         kfree(register_list_format);
2802         return 0;
2803 }
2804
2805 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2806 {
2807         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2808 }
2809
2810 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2811                                              bool enable)
2812 {
2813         uint32_t data = 0;
2814         uint32_t default_data = 0;
2815
2816         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2817         if (enable) {
2818                 /* enable GFXIP control over CGPG */
2819                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2820                 if(default_data != data)
2821                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2822
2823                 /* update status */
2824                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2825                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2826                 if(default_data != data)
2827                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2828         } else {
2829                 /* restore GFXIP control over GCPG */
2830                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2831                 if(default_data != data)
2832                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2833         }
2834 }
2835
2836 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2837 {
2838         uint32_t data = 0;
2839
2840         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2841                               AMD_PG_SUPPORT_GFX_SMG |
2842                               AMD_PG_SUPPORT_GFX_DMG)) {
2843                 /* init IDLE_POLL_COUNT = 60 */
2844                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2845                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2846                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2847                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2848
2849                 /* init RLC PG Delay */
2850                 data = 0;
2851                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2852                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2853                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2854                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2855                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2856
2857                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2858                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2859                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2860                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2861
2862                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2863                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2864                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2865                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2866
2867                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2868                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2869
2870                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2871                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2872                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2873                 if (adev->asic_type != CHIP_RENOIR)
2874                         pwr_10_0_gfxip_control_over_cgpg(adev, true);
2875         }
2876 }
2877
2878 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2879                                                 bool enable)
2880 {
2881         uint32_t data = 0;
2882         uint32_t default_data = 0;
2883
2884         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2885         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2886                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2887                              enable ? 1 : 0);
2888         if (default_data != data)
2889                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2890 }
2891
2892 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2893                                                 bool enable)
2894 {
2895         uint32_t data = 0;
2896         uint32_t default_data = 0;
2897
2898         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2899         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2900                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2901                              enable ? 1 : 0);
2902         if(default_data != data)
2903                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2904 }
2905
2906 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2907                                         bool enable)
2908 {
2909         uint32_t data = 0;
2910         uint32_t default_data = 0;
2911
2912         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2913         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2914                              CP_PG_DISABLE,
2915                              enable ? 0 : 1);
2916         if(default_data != data)
2917                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2918 }
2919
2920 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2921                                                 bool enable)
2922 {
2923         uint32_t data, default_data;
2924
2925         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2926         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2927                              GFX_POWER_GATING_ENABLE,
2928                              enable ? 1 : 0);
2929         if(default_data != data)
2930                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2931 }
2932
2933 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2934                                                 bool enable)
2935 {
2936         uint32_t data, default_data;
2937
2938         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2939         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2940                              GFX_PIPELINE_PG_ENABLE,
2941                              enable ? 1 : 0);
2942         if(default_data != data)
2943                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2944
2945         if (!enable)
2946                 /* read any GFX register to wake up GFX */
2947                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2948 }
2949
2950 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2951                                                        bool enable)
2952 {
2953         uint32_t data, default_data;
2954
2955         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2956         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2957                              STATIC_PER_CU_PG_ENABLE,
2958                              enable ? 1 : 0);
2959         if(default_data != data)
2960                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2961 }
2962
2963 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2964                                                 bool enable)
2965 {
2966         uint32_t data, default_data;
2967
2968         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2969         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2970                              DYN_PER_CU_PG_ENABLE,
2971                              enable ? 1 : 0);
2972         if(default_data != data)
2973                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2974 }
2975
2976 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2977 {
2978         gfx_v9_0_init_csb(adev);
2979
2980         /*
2981          * Rlc save restore list is workable since v2_1.
2982          * And it's needed by gfxoff feature.
2983          */
2984         if (adev->gfx.rlc.is_rlc_v2_1) {
2985                 if (adev->asic_type == CHIP_VEGA12 ||
2986                     (adev->apu_flags & AMD_APU_IS_RAVEN2))
2987                         gfx_v9_1_init_rlc_save_restore_list(adev);
2988                 gfx_v9_0_enable_save_restore_machine(adev);
2989         }
2990
2991         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2992                               AMD_PG_SUPPORT_GFX_SMG |
2993                               AMD_PG_SUPPORT_GFX_DMG |
2994                               AMD_PG_SUPPORT_CP |
2995                               AMD_PG_SUPPORT_GDS |
2996                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2997                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2998                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2999                 gfx_v9_0_init_gfx_power_gating(adev);
3000         }
3001 }
3002
3003 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3004 {
3005         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3006         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3007         gfx_v9_0_wait_for_rlc_serdes(adev);
3008 }
3009
3010 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3011 {
3012         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3013         udelay(50);
3014         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3015         udelay(50);
3016 }
3017
3018 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3019 {
3020 #ifdef AMDGPU_RLC_DEBUG_RETRY
3021         u32 rlc_ucode_ver;
3022 #endif
3023
3024         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3025         udelay(50);
3026
3027         /* carrizo do enable cp interrupt after cp inited */
3028         if (!(adev->flags & AMD_IS_APU)) {
3029                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3030                 udelay(50);
3031         }
3032
3033 #ifdef AMDGPU_RLC_DEBUG_RETRY
3034         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3035         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3036         if(rlc_ucode_ver == 0x108) {
3037                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3038                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3039                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3040                  * default is 0x9C4 to create a 100us interval */
3041                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3042                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3043                  * to disable the page fault retry interrupts, default is
3044                  * 0x100 (256) */
3045                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3046         }
3047 #endif
3048 }
3049
3050 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3051 {
3052         const struct rlc_firmware_header_v2_0 *hdr;
3053         const __le32 *fw_data;
3054         unsigned i, fw_size;
3055
3056         if (!adev->gfx.rlc_fw)
3057                 return -EINVAL;
3058
3059         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3060         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3061
3062         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3063                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3064         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3065
3066         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3067                         RLCG_UCODE_LOADING_START_ADDRESS);
3068         for (i = 0; i < fw_size; i++)
3069                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3070         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3071
3072         return 0;
3073 }
3074
3075 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3076 {
3077         int r;
3078
3079         if (amdgpu_sriov_vf(adev)) {
3080                 gfx_v9_0_init_csb(adev);
3081                 return 0;
3082         }
3083
3084         adev->gfx.rlc.funcs->stop(adev);
3085
3086         /* disable CG */
3087         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3088
3089         gfx_v9_0_init_pg(adev);
3090
3091         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3092                 /* legacy rlc firmware loading */
3093                 r = gfx_v9_0_rlc_load_microcode(adev);
3094                 if (r)
3095                         return r;
3096         }
3097
3098         switch (adev->asic_type) {
3099         case CHIP_RAVEN:
3100                 if (amdgpu_lbpw == 0)
3101                         gfx_v9_0_enable_lbpw(adev, false);
3102                 else
3103                         gfx_v9_0_enable_lbpw(adev, true);
3104                 break;
3105         case CHIP_VEGA20:
3106                 if (amdgpu_lbpw > 0)
3107                         gfx_v9_0_enable_lbpw(adev, true);
3108                 else
3109                         gfx_v9_0_enable_lbpw(adev, false);
3110                 break;
3111         default:
3112                 break;
3113         }
3114
3115         adev->gfx.rlc.funcs->start(adev);
3116
3117         return 0;
3118 }
3119
3120 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3121 {
3122         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3123
3124         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3125         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3126         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3127         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3128         udelay(50);
3129 }
3130
3131 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3132 {
3133         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3134         const struct gfx_firmware_header_v1_0 *ce_hdr;
3135         const struct gfx_firmware_header_v1_0 *me_hdr;
3136         const __le32 *fw_data;
3137         unsigned i, fw_size;
3138
3139         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3140                 return -EINVAL;
3141
3142         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3143                 adev->gfx.pfp_fw->data;
3144         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3145                 adev->gfx.ce_fw->data;
3146         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3147                 adev->gfx.me_fw->data;
3148
3149         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3150         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3151         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3152
3153         gfx_v9_0_cp_gfx_enable(adev, false);
3154
3155         /* PFP */
3156         fw_data = (const __le32 *)
3157                 (adev->gfx.pfp_fw->data +
3158                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3159         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3160         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3161         for (i = 0; i < fw_size; i++)
3162                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3163         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3164
3165         /* CE */
3166         fw_data = (const __le32 *)
3167                 (adev->gfx.ce_fw->data +
3168                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3169         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3170         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3171         for (i = 0; i < fw_size; i++)
3172                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3173         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3174
3175         /* ME */
3176         fw_data = (const __le32 *)
3177                 (adev->gfx.me_fw->data +
3178                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3179         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3180         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3181         for (i = 0; i < fw_size; i++)
3182                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3183         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3184
3185         return 0;
3186 }
3187
3188 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3189 {
3190         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3191         const struct cs_section_def *sect = NULL;
3192         const struct cs_extent_def *ext = NULL;
3193         int r, i, tmp;
3194
3195         /* init the CP */
3196         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3197         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3198
3199         gfx_v9_0_cp_gfx_enable(adev, true);
3200
3201         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3202         if (r) {
3203                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3204                 return r;
3205         }
3206
3207         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3208         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3209
3210         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3211         amdgpu_ring_write(ring, 0x80000000);
3212         amdgpu_ring_write(ring, 0x80000000);
3213
3214         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3215                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3216                         if (sect->id == SECT_CONTEXT) {
3217                                 amdgpu_ring_write(ring,
3218                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3219                                                ext->reg_count));
3220                                 amdgpu_ring_write(ring,
3221                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3222                                 for (i = 0; i < ext->reg_count; i++)
3223                                         amdgpu_ring_write(ring, ext->extent[i]);
3224                         }
3225                 }
3226         }
3227
3228         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3229         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3230
3231         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3232         amdgpu_ring_write(ring, 0);
3233
3234         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3235         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3236         amdgpu_ring_write(ring, 0x8000);
3237         amdgpu_ring_write(ring, 0x8000);
3238
3239         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3240         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3241                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3242         amdgpu_ring_write(ring, tmp);
3243         amdgpu_ring_write(ring, 0);
3244
3245         amdgpu_ring_commit(ring);
3246
3247         return 0;
3248 }
3249
3250 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3251 {
3252         struct amdgpu_ring *ring;
3253         u32 tmp;
3254         u32 rb_bufsz;
3255         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3256
3257         /* Set the write pointer delay */
3258         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3259
3260         /* set the RB to use vmid 0 */
3261         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3262
3263         /* Set ring buffer size */
3264         ring = &adev->gfx.gfx_ring[0];
3265         rb_bufsz = order_base_2(ring->ring_size / 8);
3266         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3267         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3268 #ifdef __BIG_ENDIAN
3269         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3270 #endif
3271         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3272
3273         /* Initialize the ring buffer's write pointers */
3274         ring->wptr = 0;
3275         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3276         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3277
3278         /* set the wb address wether it's enabled or not */
3279         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3280         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3281         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3282
3283         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3284         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3285         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3286
3287         mdelay(1);
3288         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3289
3290         rb_addr = ring->gpu_addr >> 8;
3291         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3292         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3293
3294         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3295         if (ring->use_doorbell) {
3296                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3297                                     DOORBELL_OFFSET, ring->doorbell_index);
3298                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3299                                     DOORBELL_EN, 1);
3300         } else {
3301                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3302         }
3303         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3304
3305         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3306                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3307         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3308
3309         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3310                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3311
3312
3313         /* start the ring */
3314         gfx_v9_0_cp_gfx_start(adev);
3315         ring->sched.ready = true;
3316
3317         return 0;
3318 }
3319
3320 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3321 {
3322         if (enable) {
3323                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3324         } else {
3325                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3326                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3327                 adev->gfx.kiq.ring.sched.ready = false;
3328         }
3329         udelay(50);
3330 }
3331
3332 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3333 {
3334         const struct gfx_firmware_header_v1_0 *mec_hdr;
3335         const __le32 *fw_data;
3336         unsigned i;
3337         u32 tmp;
3338
3339         if (!adev->gfx.mec_fw)
3340                 return -EINVAL;
3341
3342         gfx_v9_0_cp_compute_enable(adev, false);
3343
3344         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3345         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3346
3347         fw_data = (const __le32 *)
3348                 (adev->gfx.mec_fw->data +
3349                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3350         tmp = 0;
3351         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3352         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3353         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3354
3355         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3356                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3357         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3358                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3359
3360         /* MEC1 */
3361         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3362                          mec_hdr->jt_offset);
3363         for (i = 0; i < mec_hdr->jt_size; i++)
3364                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3365                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3366
3367         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3368                         adev->gfx.mec_fw_version);
3369         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3370
3371         return 0;
3372 }
3373
3374 /* KIQ functions */
3375 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3376 {
3377         uint32_t tmp;
3378         struct amdgpu_device *adev = ring->adev;
3379
3380         /* tell RLC which is KIQ queue */
3381         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3382         tmp &= 0xffffff00;
3383         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3384         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3385         tmp |= 0x80;
3386         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3387 }
3388
3389 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3390 {
3391         struct amdgpu_device *adev = ring->adev;
3392
3393         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3394                 if (amdgpu_gfx_is_high_priority_compute_queue(adev,
3395                                                               ring->pipe,
3396                                                               ring->queue)) {
3397                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3398                         mqd->cp_hqd_queue_priority =
3399                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3400                 }
3401         }
3402 }
3403
3404 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3405 {
3406         struct amdgpu_device *adev = ring->adev;
3407         struct v9_mqd *mqd = ring->mqd_ptr;
3408         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3409         uint32_t tmp;
3410
3411         mqd->header = 0xC0310800;
3412         mqd->compute_pipelinestat_enable = 0x00000001;
3413         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3414         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3415         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3416         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3417         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3418         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3419         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3420         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3421         mqd->compute_misc_reserved = 0x00000003;
3422
3423         mqd->dynamic_cu_mask_addr_lo =
3424                 lower_32_bits(ring->mqd_gpu_addr
3425                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3426         mqd->dynamic_cu_mask_addr_hi =
3427                 upper_32_bits(ring->mqd_gpu_addr
3428                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3429
3430         eop_base_addr = ring->eop_gpu_addr >> 8;
3431         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3432         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3433
3434         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3435         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3436         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3437                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3438
3439         mqd->cp_hqd_eop_control = tmp;
3440
3441         /* enable doorbell? */
3442         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3443
3444         if (ring->use_doorbell) {
3445                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3446                                     DOORBELL_OFFSET, ring->doorbell_index);
3447                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3448                                     DOORBELL_EN, 1);
3449                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3450                                     DOORBELL_SOURCE, 0);
3451                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3452                                     DOORBELL_HIT, 0);
3453         } else {
3454                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3455                                          DOORBELL_EN, 0);
3456         }
3457
3458         mqd->cp_hqd_pq_doorbell_control = tmp;
3459
3460         /* disable the queue if it's active */
3461         ring->wptr = 0;
3462         mqd->cp_hqd_dequeue_request = 0;
3463         mqd->cp_hqd_pq_rptr = 0;
3464         mqd->cp_hqd_pq_wptr_lo = 0;
3465         mqd->cp_hqd_pq_wptr_hi = 0;
3466
3467         /* set the pointer to the MQD */
3468         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3469         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3470
3471         /* set MQD vmid to 0 */
3472         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3473         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3474         mqd->cp_mqd_control = tmp;
3475
3476         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3477         hqd_gpu_addr = ring->gpu_addr >> 8;
3478         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3479         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3480
3481         /* set up the HQD, this is similar to CP_RB0_CNTL */
3482         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3483         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3484                             (order_base_2(ring->ring_size / 4) - 1));
3485         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3486                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3487 #ifdef __BIG_ENDIAN
3488         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3489 #endif
3490         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3491         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3492         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3493         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3494         mqd->cp_hqd_pq_control = tmp;
3495
3496         /* set the wb address whether it's enabled or not */
3497         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3498         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3499         mqd->cp_hqd_pq_rptr_report_addr_hi =
3500                 upper_32_bits(wb_gpu_addr) & 0xffff;
3501
3502         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3503         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3504         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3505         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3506
3507         tmp = 0;
3508         /* enable the doorbell if requested */
3509         if (ring->use_doorbell) {
3510                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3511                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3512                                 DOORBELL_OFFSET, ring->doorbell_index);
3513
3514                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3515                                          DOORBELL_EN, 1);
3516                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3517                                          DOORBELL_SOURCE, 0);
3518                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3519                                          DOORBELL_HIT, 0);
3520         }
3521
3522         mqd->cp_hqd_pq_doorbell_control = tmp;
3523
3524         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3525         ring->wptr = 0;
3526         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3527
3528         /* set the vmid for the queue */
3529         mqd->cp_hqd_vmid = 0;
3530
3531         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3532         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3533         mqd->cp_hqd_persistent_state = tmp;
3534
3535         /* set MIN_IB_AVAIL_SIZE */
3536         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3537         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3538         mqd->cp_hqd_ib_control = tmp;
3539
3540         /* set static priority for a queue/ring */
3541         gfx_v9_0_mqd_set_priority(ring, mqd);
3542         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3543
3544         /* map_queues packet doesn't need activate the queue,
3545          * so only kiq need set this field.
3546          */
3547         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3548                 mqd->cp_hqd_active = 1;
3549
3550         return 0;
3551 }
3552
3553 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3554 {
3555         struct amdgpu_device *adev = ring->adev;
3556         struct v9_mqd *mqd = ring->mqd_ptr;
3557         int j;
3558
3559         /* disable wptr polling */
3560         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3561
3562         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3563                mqd->cp_hqd_eop_base_addr_lo);
3564         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3565                mqd->cp_hqd_eop_base_addr_hi);
3566
3567         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3568         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3569                mqd->cp_hqd_eop_control);
3570
3571         /* enable doorbell? */
3572         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3573                mqd->cp_hqd_pq_doorbell_control);
3574
3575         /* disable the queue if it's active */
3576         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3577                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3578                 for (j = 0; j < adev->usec_timeout; j++) {
3579                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3580                                 break;
3581                         udelay(1);
3582                 }
3583                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3584                        mqd->cp_hqd_dequeue_request);
3585                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3586                        mqd->cp_hqd_pq_rptr);
3587                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3588                        mqd->cp_hqd_pq_wptr_lo);
3589                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3590                        mqd->cp_hqd_pq_wptr_hi);
3591         }
3592
3593         /* set the pointer to the MQD */
3594         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3595                mqd->cp_mqd_base_addr_lo);
3596         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3597                mqd->cp_mqd_base_addr_hi);
3598
3599         /* set MQD vmid to 0 */
3600         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3601                mqd->cp_mqd_control);
3602
3603         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3604         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3605                mqd->cp_hqd_pq_base_lo);
3606         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3607                mqd->cp_hqd_pq_base_hi);
3608
3609         /* set up the HQD, this is similar to CP_RB0_CNTL */
3610         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3611                mqd->cp_hqd_pq_control);
3612
3613         /* set the wb address whether it's enabled or not */
3614         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3615                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3616         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3617                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3618
3619         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3620         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3621                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3622         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3623                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3624
3625         /* enable the doorbell if requested */
3626         if (ring->use_doorbell) {
3627                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3628                                         (adev->doorbell_index.kiq * 2) << 2);
3629                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3630                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3631         }
3632
3633         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3634                mqd->cp_hqd_pq_doorbell_control);
3635
3636         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3637         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3638                mqd->cp_hqd_pq_wptr_lo);
3639         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3640                mqd->cp_hqd_pq_wptr_hi);
3641
3642         /* set the vmid for the queue */
3643         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3644
3645         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3646                mqd->cp_hqd_persistent_state);
3647
3648         /* activate the queue */
3649         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3650                mqd->cp_hqd_active);
3651
3652         if (ring->use_doorbell)
3653                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3654
3655         return 0;
3656 }
3657
3658 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3659 {
3660         struct amdgpu_device *adev = ring->adev;
3661         int j;
3662
3663         /* disable the queue if it's active */
3664         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3665
3666                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3667
3668                 for (j = 0; j < adev->usec_timeout; j++) {
3669                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3670                                 break;
3671                         udelay(1);
3672                 }
3673
3674                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3675                         DRM_DEBUG("KIQ dequeue request failed.\n");
3676
3677                         /* Manual disable if dequeue request times out */
3678                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3679                 }
3680
3681                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3682                       0);
3683         }
3684
3685         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3686         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3687         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3688         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3689         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3690         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3691         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3692         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3693
3694         return 0;
3695 }
3696
3697 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3698 {
3699         struct amdgpu_device *adev = ring->adev;
3700         struct v9_mqd *mqd = ring->mqd_ptr;
3701         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3702
3703         gfx_v9_0_kiq_setting(ring);
3704
3705         if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3706                 /* reset MQD to a clean status */
3707                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3708                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3709
3710                 /* reset ring buffer */
3711                 ring->wptr = 0;
3712                 amdgpu_ring_clear_ring(ring);
3713
3714                 mutex_lock(&adev->srbm_mutex);
3715                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3716                 gfx_v9_0_kiq_init_register(ring);
3717                 soc15_grbm_select(adev, 0, 0, 0, 0);
3718                 mutex_unlock(&adev->srbm_mutex);
3719         } else {
3720                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3721                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3722                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3723                 mutex_lock(&adev->srbm_mutex);
3724                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3725                 gfx_v9_0_mqd_init(ring);
3726                 gfx_v9_0_kiq_init_register(ring);
3727                 soc15_grbm_select(adev, 0, 0, 0, 0);
3728                 mutex_unlock(&adev->srbm_mutex);
3729
3730                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3731                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3732         }
3733
3734         return 0;
3735 }
3736
3737 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3738 {
3739         struct amdgpu_device *adev = ring->adev;
3740         struct v9_mqd *mqd = ring->mqd_ptr;
3741         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3742
3743         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3744                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3745                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3746                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3747                 mutex_lock(&adev->srbm_mutex);
3748                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3749                 gfx_v9_0_mqd_init(ring);
3750                 soc15_grbm_select(adev, 0, 0, 0, 0);
3751                 mutex_unlock(&adev->srbm_mutex);
3752
3753                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3754                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3755         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3756                 /* reset MQD to a clean status */
3757                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3758                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3759
3760                 /* reset ring buffer */
3761                 ring->wptr = 0;
3762                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3763                 amdgpu_ring_clear_ring(ring);
3764         } else {
3765                 amdgpu_ring_clear_ring(ring);
3766         }
3767
3768         return 0;
3769 }
3770
3771 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3772 {
3773         struct amdgpu_ring *ring;
3774         int r;
3775
3776         ring = &adev->gfx.kiq.ring;
3777
3778         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3779         if (unlikely(r != 0))
3780                 return r;
3781
3782         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3783         if (unlikely(r != 0))
3784                 return r;
3785
3786         gfx_v9_0_kiq_init_queue(ring);
3787         amdgpu_bo_kunmap(ring->mqd_obj);
3788         ring->mqd_ptr = NULL;
3789         amdgpu_bo_unreserve(ring->mqd_obj);
3790         ring->sched.ready = true;
3791         return 0;
3792 }
3793
3794 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3795 {
3796         struct amdgpu_ring *ring = NULL;
3797         int r = 0, i;
3798
3799         gfx_v9_0_cp_compute_enable(adev, true);
3800
3801         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3802                 ring = &adev->gfx.compute_ring[i];
3803
3804                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3805                 if (unlikely(r != 0))
3806                         goto done;
3807                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3808                 if (!r) {
3809                         r = gfx_v9_0_kcq_init_queue(ring);
3810                         amdgpu_bo_kunmap(ring->mqd_obj);
3811                         ring->mqd_ptr = NULL;
3812                 }
3813                 amdgpu_bo_unreserve(ring->mqd_obj);
3814                 if (r)
3815                         goto done;
3816         }
3817
3818         r = amdgpu_gfx_enable_kcq(adev);
3819 done:
3820         return r;
3821 }
3822
3823 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3824 {
3825         int r, i;
3826         struct amdgpu_ring *ring;
3827
3828         if (!(adev->flags & AMD_IS_APU))
3829                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3830
3831         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3832                 if (adev->gfx.num_gfx_rings) {
3833                         /* legacy firmware loading */
3834                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3835                         if (r)
3836                                 return r;
3837                 }
3838
3839                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3840                 if (r)
3841                         return r;
3842         }
3843
3844         r = gfx_v9_0_kiq_resume(adev);
3845         if (r)
3846                 return r;
3847
3848         if (adev->gfx.num_gfx_rings) {
3849                 r = gfx_v9_0_cp_gfx_resume(adev);
3850                 if (r)
3851                         return r;
3852         }
3853
3854         r = gfx_v9_0_kcq_resume(adev);
3855         if (r)
3856                 return r;
3857
3858         if (adev->gfx.num_gfx_rings) {
3859                 ring = &adev->gfx.gfx_ring[0];
3860                 r = amdgpu_ring_test_helper(ring);
3861                 if (r)
3862                         return r;
3863         }
3864
3865         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3866                 ring = &adev->gfx.compute_ring[i];
3867                 amdgpu_ring_test_helper(ring);
3868         }
3869
3870         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3871
3872         return 0;
3873 }
3874
3875 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3876 {
3877         u32 tmp;
3878
3879         if (adev->asic_type != CHIP_ARCTURUS)
3880                 return;
3881
3882         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3883         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3884                                 adev->df.hash_status.hash_64k);
3885         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3886                                 adev->df.hash_status.hash_2m);
3887         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3888                                 adev->df.hash_status.hash_1g);
3889         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3890 }
3891
3892 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3893 {
3894         if (adev->gfx.num_gfx_rings)
3895                 gfx_v9_0_cp_gfx_enable(adev, enable);
3896         gfx_v9_0_cp_compute_enable(adev, enable);
3897 }
3898
3899 static int gfx_v9_0_hw_init(void *handle)
3900 {
3901         int r;
3902         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3903
3904         if (!amdgpu_sriov_vf(adev))
3905                 gfx_v9_0_init_golden_registers(adev);
3906
3907         gfx_v9_0_constants_init(adev);
3908
3909         gfx_v9_0_init_tcp_config(adev);
3910
3911         r = adev->gfx.rlc.funcs->resume(adev);
3912         if (r)
3913                 return r;
3914
3915         r = gfx_v9_0_cp_resume(adev);
3916         if (r)
3917                 return r;
3918
3919         return r;
3920 }
3921
3922 static int gfx_v9_0_hw_fini(void *handle)
3923 {
3924         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3925
3926         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3927         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3928         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3929
3930         /* DF freeze and kcq disable will fail */
3931         if (!amdgpu_ras_intr_triggered())
3932                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3933                 amdgpu_gfx_disable_kcq(adev);
3934
3935         if (amdgpu_sriov_vf(adev)) {
3936                 gfx_v9_0_cp_gfx_enable(adev, false);
3937                 /* must disable polling for SRIOV when hw finished, otherwise
3938                  * CPC engine may still keep fetching WB address which is already
3939                  * invalid after sw finished and trigger DMAR reading error in
3940                  * hypervisor side.
3941                  */
3942                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3943                 return 0;
3944         }
3945
3946         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3947          * otherwise KIQ is hanging when binding back
3948          */
3949         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3950                 mutex_lock(&adev->srbm_mutex);
3951                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3952                                 adev->gfx.kiq.ring.pipe,
3953                                 adev->gfx.kiq.ring.queue, 0);
3954                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3955                 soc15_grbm_select(adev, 0, 0, 0, 0);
3956                 mutex_unlock(&adev->srbm_mutex);
3957         }
3958
3959         gfx_v9_0_cp_enable(adev, false);
3960         adev->gfx.rlc.funcs->stop(adev);
3961
3962         return 0;
3963 }
3964
3965 static int gfx_v9_0_suspend(void *handle)
3966 {
3967         return gfx_v9_0_hw_fini(handle);
3968 }
3969
3970 static int gfx_v9_0_resume(void *handle)
3971 {
3972         return gfx_v9_0_hw_init(handle);
3973 }
3974
3975 static bool gfx_v9_0_is_idle(void *handle)
3976 {
3977         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3978
3979         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3980                                 GRBM_STATUS, GUI_ACTIVE))
3981                 return false;
3982         else
3983                 return true;
3984 }
3985
3986 static int gfx_v9_0_wait_for_idle(void *handle)
3987 {
3988         unsigned i;
3989         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3990
3991         for (i = 0; i < adev->usec_timeout; i++) {
3992                 if (gfx_v9_0_is_idle(handle))
3993                         return 0;
3994                 udelay(1);
3995         }
3996         return -ETIMEDOUT;
3997 }
3998
3999 static int gfx_v9_0_soft_reset(void *handle)
4000 {
4001         u32 grbm_soft_reset = 0;
4002         u32 tmp;
4003         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4004
4005         /* GRBM_STATUS */
4006         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4007         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4008                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4009                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4010                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4011                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4012                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4013                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4014                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4015                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4016                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4017         }
4018
4019         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4020                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4021                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4022         }
4023
4024         /* GRBM_STATUS2 */
4025         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4026         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4027                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4028                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4029
4030
4031         if (grbm_soft_reset) {
4032                 /* stop the rlc */
4033                 adev->gfx.rlc.funcs->stop(adev);
4034
4035                 if (adev->gfx.num_gfx_rings)
4036                         /* Disable GFX parsing/prefetching */
4037                         gfx_v9_0_cp_gfx_enable(adev, false);
4038
4039                 /* Disable MEC parsing/prefetching */
4040                 gfx_v9_0_cp_compute_enable(adev, false);
4041
4042                 if (grbm_soft_reset) {
4043                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4044                         tmp |= grbm_soft_reset;
4045                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4046                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4047                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4048
4049                         udelay(50);
4050
4051                         tmp &= ~grbm_soft_reset;
4052                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4053                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4054                 }
4055
4056                 /* Wait a little for things to settle down */
4057                 udelay(50);
4058         }
4059         return 0;
4060 }
4061
4062 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4063 {
4064         signed long r, cnt = 0;
4065         unsigned long flags;
4066         uint32_t seq, reg_val_offs = 0;
4067         uint64_t value = 0;
4068         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4069         struct amdgpu_ring *ring = &kiq->ring;
4070
4071         BUG_ON(!ring->funcs->emit_rreg);
4072
4073         spin_lock_irqsave(&kiq->ring_lock, flags);
4074         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4075                 pr_err("critical bug! too many kiq readers\n");
4076                 goto failed_unlock;
4077         }
4078         amdgpu_ring_alloc(ring, 32);
4079         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4080         amdgpu_ring_write(ring, 9 |     /* src: register*/
4081                                 (5 << 8) |      /* dst: memory */
4082                                 (1 << 16) |     /* count sel */
4083                                 (1 << 20));     /* write confirm */
4084         amdgpu_ring_write(ring, 0);
4085         amdgpu_ring_write(ring, 0);
4086         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4087                                 reg_val_offs * 4));
4088         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4089                                 reg_val_offs * 4));
4090         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4091         if (r)
4092                 goto failed_undo;
4093
4094         amdgpu_ring_commit(ring);
4095         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4096
4097         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4098
4099         /* don't wait anymore for gpu reset case because this way may
4100          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4101          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4102          * never return if we keep waiting in virt_kiq_rreg, which cause
4103          * gpu_recover() hang there.
4104          *
4105          * also don't wait anymore for IRQ context
4106          * */
4107         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
4108                 goto failed_kiq_read;
4109
4110         might_sleep();
4111         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4112                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4113                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4114         }
4115
4116         if (cnt > MAX_KIQ_REG_TRY)
4117                 goto failed_kiq_read;
4118
4119         mb();
4120         value = (uint64_t)adev->wb.wb[reg_val_offs] |
4121                 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4122         amdgpu_device_wb_free(adev, reg_val_offs);
4123         return value;
4124
4125 failed_undo:
4126         amdgpu_ring_undo(ring);
4127 failed_unlock:
4128         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4129 failed_kiq_read:
4130         if (reg_val_offs)
4131                 amdgpu_device_wb_free(adev, reg_val_offs);
4132         pr_err("failed to read gpu clock\n");
4133         return ~0;
4134 }
4135
4136 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4137 {
4138         uint64_t clock;
4139
4140         amdgpu_gfx_off_ctrl(adev, false);
4141         mutex_lock(&adev->gfx.gpu_clock_mutex);
4142         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4143                 clock = gfx_v9_0_kiq_read_clock(adev);
4144         } else {
4145                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4146                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4147                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4148         }
4149         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4150         amdgpu_gfx_off_ctrl(adev, true);
4151         return clock;
4152 }
4153
4154 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4155                                           uint32_t vmid,
4156                                           uint32_t gds_base, uint32_t gds_size,
4157                                           uint32_t gws_base, uint32_t gws_size,
4158                                           uint32_t oa_base, uint32_t oa_size)
4159 {
4160         struct amdgpu_device *adev = ring->adev;
4161
4162         /* GDS Base */
4163         gfx_v9_0_write_data_to_reg(ring, 0, false,
4164                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4165                                    gds_base);
4166
4167         /* GDS Size */
4168         gfx_v9_0_write_data_to_reg(ring, 0, false,
4169                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4170                                    gds_size);
4171
4172         /* GWS */
4173         gfx_v9_0_write_data_to_reg(ring, 0, false,
4174                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4175                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4176
4177         /* OA */
4178         gfx_v9_0_write_data_to_reg(ring, 0, false,
4179                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4180                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4181 }
4182
4183 static const u32 vgpr_init_compute_shader[] =
4184 {
4185         0xb07c0000, 0xbe8000ff,
4186         0x000000f8, 0xbf110800,
4187         0x7e000280, 0x7e020280,
4188         0x7e040280, 0x7e060280,
4189         0x7e080280, 0x7e0a0280,
4190         0x7e0c0280, 0x7e0e0280,
4191         0x80808800, 0xbe803200,
4192         0xbf84fff5, 0xbf9c0000,
4193         0xd28c0001, 0x0001007f,
4194         0xd28d0001, 0x0002027e,
4195         0x10020288, 0xb8810904,
4196         0xb7814000, 0xd1196a01,
4197         0x00000301, 0xbe800087,
4198         0xbefc00c1, 0xd89c4000,
4199         0x00020201, 0xd89cc080,
4200         0x00040401, 0x320202ff,
4201         0x00000800, 0x80808100,
4202         0xbf84fff8, 0x7e020280,
4203         0xbf810000, 0x00000000,
4204 };
4205
4206 static const u32 sgpr_init_compute_shader[] =
4207 {
4208         0xb07c0000, 0xbe8000ff,
4209         0x0000005f, 0xbee50080,
4210         0xbe812c65, 0xbe822c65,
4211         0xbe832c65, 0xbe842c65,
4212         0xbe852c65, 0xb77c0005,
4213         0x80808500, 0xbf84fff8,
4214         0xbe800080, 0xbf810000,
4215 };
4216
4217 static const u32 vgpr_init_compute_shader_arcturus[] = {
4218         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4219         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4220         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4221         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4222         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4223         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4224         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4225         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4226         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4227         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4228         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4229         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4230         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4231         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4232         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4233         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4234         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4235         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4236         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4237         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4238         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4239         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4240         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4241         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4242         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4243         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4244         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4245         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4246         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4247         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4248         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4249         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4250         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4251         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4252         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4253         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4254         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4255         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4256         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4257         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4258         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4259         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4260         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4261         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4262         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4263         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4264         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4265         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4266         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4267         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4268         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4269         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4270         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4271         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4272         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4273         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4274         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4275         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4276         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4277         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4278         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4279         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4280         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4281         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4282         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4283         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4284         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4285         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4286         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4287         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4288         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4289         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4290         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4291         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4292         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4293         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4294         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4295         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4296         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4297         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4298         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4299         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4300         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4301         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4302         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4303         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4304         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4305         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4306         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4307         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4308         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4309         0xbf84fff8, 0xbf810000,
4310 };
4311
4312 /* When below register arrays changed, please update gpr_reg_size,
4313   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4314   to cover all gfx9 ASICs */
4315 static const struct soc15_reg_entry vgpr_init_regs[] = {
4316    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4317    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4318    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4319    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4320    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4321    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4322    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4323    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4324    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4325    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4326    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4327    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4328    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4329    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4330 };
4331
4332 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4333    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4334    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4335    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4336    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4337    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4338    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4339    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4340    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4341    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4342    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4343    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4344    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4345    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4346    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4347 };
4348
4349 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4350    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4351    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4352    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4353    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4354    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4355    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4356    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4357    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4358    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4359    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4360    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4361    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4362    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4363    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4364 };
4365
4366 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4367    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4368    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4369    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4370    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4371    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4372    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4373    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4374    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4375    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4376    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4377    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4378    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4379    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4380    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4381 };
4382
4383 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4384    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4385    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4386    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4387    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4388    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4389    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4390    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4391    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4392    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4393    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4394    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4395    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4396    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4397    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4398    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4399    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4400    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4401    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4402    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4403    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4404    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4405    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4406    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4407    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4408    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4409    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4410    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4411    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4412    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4413    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4414    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4415    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4416    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4417 };
4418
4419 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4420 {
4421         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4422         int i, r;
4423
4424         /* only support when RAS is enabled */
4425         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4426                 return 0;
4427
4428         r = amdgpu_ring_alloc(ring, 7);
4429         if (r) {
4430                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4431                         ring->name, r);
4432                 return r;
4433         }
4434
4435         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4436         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4437
4438         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4439         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4440                                 PACKET3_DMA_DATA_DST_SEL(1) |
4441                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4442                                 PACKET3_DMA_DATA_ENGINE(0)));
4443         amdgpu_ring_write(ring, 0);
4444         amdgpu_ring_write(ring, 0);
4445         amdgpu_ring_write(ring, 0);
4446         amdgpu_ring_write(ring, 0);
4447         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4448                                 adev->gds.gds_size);
4449
4450         amdgpu_ring_commit(ring);
4451
4452         for (i = 0; i < adev->usec_timeout; i++) {
4453                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4454                         break;
4455                 udelay(1);
4456         }
4457
4458         if (i >= adev->usec_timeout)
4459                 r = -ETIMEDOUT;
4460
4461         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4462
4463         return r;
4464 }
4465
4466 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4467 {
4468         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4469         struct amdgpu_ib ib;
4470         struct dma_fence *f = NULL;
4471         int r, i;
4472         unsigned total_size, vgpr_offset, sgpr_offset;
4473         u64 gpu_addr;
4474
4475         int compute_dim_x = adev->gfx.config.max_shader_engines *
4476                                                 adev->gfx.config.max_cu_per_sh *
4477                                                 adev->gfx.config.max_sh_per_se;
4478         int sgpr_work_group_size = 5;
4479         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4480         int vgpr_init_shader_size;
4481         const u32 *vgpr_init_shader_ptr;
4482         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4483
4484         /* only support when RAS is enabled */
4485         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4486                 return 0;
4487
4488         /* bail if the compute ring is not ready */
4489         if (!ring->sched.ready)
4490                 return 0;
4491
4492         if (adev->asic_type == CHIP_ARCTURUS) {
4493                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4494                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4495                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4496         } else {
4497                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4498                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4499                 vgpr_init_regs_ptr = vgpr_init_regs;
4500         }
4501
4502         total_size =
4503                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4504         total_size +=
4505                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4506         total_size +=
4507                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4508         total_size = ALIGN(total_size, 256);
4509         vgpr_offset = total_size;
4510         total_size += ALIGN(vgpr_init_shader_size, 256);
4511         sgpr_offset = total_size;
4512         total_size += sizeof(sgpr_init_compute_shader);
4513
4514         /* allocate an indirect buffer to put the commands in */
4515         memset(&ib, 0, sizeof(ib));
4516         r = amdgpu_ib_get(adev, NULL, total_size,
4517                                         AMDGPU_IB_POOL_DIRECT, &ib);
4518         if (r) {
4519                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4520                 return r;
4521         }
4522
4523         /* load the compute shaders */
4524         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4525                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4526
4527         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4528                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4529
4530         /* init the ib length to 0 */
4531         ib.length_dw = 0;
4532
4533         /* VGPR */
4534         /* write the register state for the compute dispatch */
4535         for (i = 0; i < gpr_reg_size; i++) {
4536                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4537                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4538                                                                 - PACKET3_SET_SH_REG_START;
4539                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4540         }
4541         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4542         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4543         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4544         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4545                                                         - PACKET3_SET_SH_REG_START;
4546         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4547         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4548
4549         /* write dispatch packet */
4550         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4551         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4552         ib.ptr[ib.length_dw++] = 1; /* y */
4553         ib.ptr[ib.length_dw++] = 1; /* z */
4554         ib.ptr[ib.length_dw++] =
4555                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4556
4557         /* write CS partial flush packet */
4558         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4559         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4560
4561         /* SGPR1 */
4562         /* write the register state for the compute dispatch */
4563         for (i = 0; i < gpr_reg_size; i++) {
4564                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4565                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4566                                                                 - PACKET3_SET_SH_REG_START;
4567                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4568         }
4569         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4570         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4571         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4572         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4573                                                         - PACKET3_SET_SH_REG_START;
4574         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4575         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4576
4577         /* write dispatch packet */
4578         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4579         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4580         ib.ptr[ib.length_dw++] = 1; /* y */
4581         ib.ptr[ib.length_dw++] = 1; /* z */
4582         ib.ptr[ib.length_dw++] =
4583                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4584
4585         /* write CS partial flush packet */
4586         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4587         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4588
4589         /* SGPR2 */
4590         /* write the register state for the compute dispatch */
4591         for (i = 0; i < gpr_reg_size; i++) {
4592                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4593                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4594                                                                 - PACKET3_SET_SH_REG_START;
4595                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4596         }
4597         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4598         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4599         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4600         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4601                                                         - PACKET3_SET_SH_REG_START;
4602         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4603         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4604
4605         /* write dispatch packet */
4606         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4607         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4608         ib.ptr[ib.length_dw++] = 1; /* y */
4609         ib.ptr[ib.length_dw++] = 1; /* z */
4610         ib.ptr[ib.length_dw++] =
4611                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4612
4613         /* write CS partial flush packet */
4614         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4615         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4616
4617         /* shedule the ib on the ring */
4618         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4619         if (r) {
4620                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4621                 goto fail;
4622         }
4623
4624         /* wait for the GPU to finish processing the IB */
4625         r = dma_fence_wait(f, false);
4626         if (r) {
4627                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4628                 goto fail;
4629         }
4630
4631 fail:
4632         amdgpu_ib_free(adev, &ib, NULL);
4633         dma_fence_put(f);
4634
4635         return r;
4636 }
4637
4638 static int gfx_v9_0_early_init(void *handle)
4639 {
4640         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4641
4642         if (adev->asic_type == CHIP_ARCTURUS)
4643                 adev->gfx.num_gfx_rings = 0;
4644         else
4645                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4646         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4647                                           AMDGPU_MAX_COMPUTE_RINGS);
4648         gfx_v9_0_set_kiq_pm4_funcs(adev);
4649         gfx_v9_0_set_ring_funcs(adev);
4650         gfx_v9_0_set_irq_funcs(adev);
4651         gfx_v9_0_set_gds_init(adev);
4652         gfx_v9_0_set_rlc_funcs(adev);
4653
4654         return 0;
4655 }
4656
4657 static int gfx_v9_0_ecc_late_init(void *handle)
4658 {
4659         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4660         int r;
4661
4662         /*
4663          * Temp workaround to fix the issue that CP firmware fails to
4664          * update read pointer when CPDMA is writing clearing operation
4665          * to GDS in suspend/resume sequence on several cards. So just
4666          * limit this operation in cold boot sequence.
4667          */
4668         if (!adev->in_suspend) {
4669                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4670                 if (r)
4671                         return r;
4672         }
4673
4674         /* requires IBs so do in late init after IB pool is initialized */
4675         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4676         if (r)
4677                 return r;
4678
4679         if (adev->gfx.funcs &&
4680             adev->gfx.funcs->reset_ras_error_count)
4681                 adev->gfx.funcs->reset_ras_error_count(adev);
4682
4683         r = amdgpu_gfx_ras_late_init(adev);
4684         if (r)
4685                 return r;
4686
4687         return 0;
4688 }
4689
4690 static int gfx_v9_0_late_init(void *handle)
4691 {
4692         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4693         int r;
4694
4695         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4696         if (r)
4697                 return r;
4698
4699         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4700         if (r)
4701                 return r;
4702
4703         r = gfx_v9_0_ecc_late_init(handle);
4704         if (r)
4705                 return r;
4706
4707         return 0;
4708 }
4709
4710 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4711 {
4712         uint32_t rlc_setting;
4713
4714         /* if RLC is not enabled, do nothing */
4715         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4716         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4717                 return false;
4718
4719         return true;
4720 }
4721
4722 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4723 {
4724         uint32_t data;
4725         unsigned i;
4726
4727         data = RLC_SAFE_MODE__CMD_MASK;
4728         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4729         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4730
4731         /* wait for RLC_SAFE_MODE */
4732         for (i = 0; i < adev->usec_timeout; i++) {
4733                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4734                         break;
4735                 udelay(1);
4736         }
4737 }
4738
4739 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4740 {
4741         uint32_t data;
4742
4743         data = RLC_SAFE_MODE__CMD_MASK;
4744         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4745 }
4746
4747 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4748                                                 bool enable)
4749 {
4750         amdgpu_gfx_rlc_enter_safe_mode(adev);
4751
4752         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4753                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4754                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4755                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4756         } else {
4757                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4758                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4759                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4760         }
4761
4762         amdgpu_gfx_rlc_exit_safe_mode(adev);
4763 }
4764
4765 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4766                                                 bool enable)
4767 {
4768         /* TODO: double check if we need to perform under safe mode */
4769         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4770
4771         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4772                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4773         else
4774                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4775
4776         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4777                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4778         else
4779                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4780
4781         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4782 }
4783
4784 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4785                                                       bool enable)
4786 {
4787         uint32_t data, def;
4788
4789         amdgpu_gfx_rlc_enter_safe_mode(adev);
4790
4791         /* It is disabled by HW by default */
4792         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4793                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4794                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4795
4796                 if (adev->asic_type != CHIP_VEGA12)
4797                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4798
4799                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4800                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4801                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4802
4803                 /* only for Vega10 & Raven1 */
4804                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4805
4806                 if (def != data)
4807                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4808
4809                 /* MGLS is a global flag to control all MGLS in GFX */
4810                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4811                         /* 2 - RLC memory Light sleep */
4812                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4813                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4814                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4815                                 if (def != data)
4816                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4817                         }
4818                         /* 3 - CP memory Light sleep */
4819                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4820                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4821                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4822                                 if (def != data)
4823                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4824                         }
4825                 }
4826         } else {
4827                 /* 1 - MGCG_OVERRIDE */
4828                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4829
4830                 if (adev->asic_type != CHIP_VEGA12)
4831                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4832
4833                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4834                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4835                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4836                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4837
4838                 if (def != data)
4839                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4840
4841                 /* 2 - disable MGLS in RLC */
4842                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4843                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4844                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4845                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4846                 }
4847
4848                 /* 3 - disable MGLS in CP */
4849                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4850                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4851                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4852                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4853                 }
4854         }
4855
4856         amdgpu_gfx_rlc_exit_safe_mode(adev);
4857 }
4858
4859 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4860                                            bool enable)
4861 {
4862         uint32_t data, def;
4863
4864         if (adev->asic_type == CHIP_ARCTURUS)
4865                 return;
4866
4867         amdgpu_gfx_rlc_enter_safe_mode(adev);
4868
4869         /* Enable 3D CGCG/CGLS */
4870         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4871                 /* write cmd to clear cgcg/cgls ov */
4872                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4873                 /* unset CGCG override */
4874                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4875                 /* update CGCG and CGLS override bits */
4876                 if (def != data)
4877                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4878
4879                 /* enable 3Dcgcg FSM(0x0000363f) */
4880                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4881
4882                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4883                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4884                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4885                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4886                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4887                 if (def != data)
4888                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4889
4890                 /* set IDLE_POLL_COUNT(0x00900100) */
4891                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4892                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4893                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4894                 if (def != data)
4895                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4896         } else {
4897                 /* Disable CGCG/CGLS */
4898                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4899                 /* disable cgcg, cgls should be disabled */
4900                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4901                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4902                 /* disable cgcg and cgls in FSM */
4903                 if (def != data)
4904                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4905         }
4906
4907         amdgpu_gfx_rlc_exit_safe_mode(adev);
4908 }
4909
4910 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4911                                                       bool enable)
4912 {
4913         uint32_t def, data;
4914
4915         amdgpu_gfx_rlc_enter_safe_mode(adev);
4916
4917         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4918                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4919                 /* unset CGCG override */
4920                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4921                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4922                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4923                 else
4924                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4925                 /* update CGCG and CGLS override bits */
4926                 if (def != data)
4927                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4928
4929                 /* enable cgcg FSM(0x0000363F) */
4930                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4931
4932                 if (adev->asic_type == CHIP_ARCTURUS)
4933                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4934                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4935                 else
4936                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4937                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4938                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4939                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4940                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4941                 if (def != data)
4942                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4943
4944                 /* set IDLE_POLL_COUNT(0x00900100) */
4945                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4946                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4947                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4948                 if (def != data)
4949                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4950         } else {
4951                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4952                 /* reset CGCG/CGLS bits */
4953                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4954                 /* disable cgcg and cgls in FSM */
4955                 if (def != data)
4956                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4957         }
4958
4959         amdgpu_gfx_rlc_exit_safe_mode(adev);
4960 }
4961
4962 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4963                                             bool enable)
4964 {
4965         if (enable) {
4966                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4967                  * ===  MGCG + MGLS ===
4968                  */
4969                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4970                 /* ===  CGCG /CGLS for GFX 3D Only === */
4971                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4972                 /* ===  CGCG + CGLS === */
4973                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4974         } else {
4975                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4976                  * ===  CGCG + CGLS ===
4977                  */
4978                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4979                 /* ===  CGCG /CGLS for GFX 3D Only === */
4980                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4981                 /* ===  MGCG + MGLS === */
4982                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4983         }
4984         return 0;
4985 }
4986
4987 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4988 {
4989         u32 reg, data;
4990
4991         reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
4992         if (amdgpu_sriov_is_pp_one_vf(adev))
4993                 data = RREG32_NO_KIQ(reg);
4994         else
4995                 data = RREG32(reg);
4996
4997         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4998         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4999
5000         if (amdgpu_sriov_is_pp_one_vf(adev))
5001                 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5002         else
5003                 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5004 }
5005
5006 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5007                                         uint32_t offset,
5008                                         struct soc15_reg_rlcg *entries, int arr_size)
5009 {
5010         int i;
5011         uint32_t reg;
5012
5013         if (!entries)
5014                 return false;
5015
5016         for (i = 0; i < arr_size; i++) {
5017                 const struct soc15_reg_rlcg *entry;
5018
5019                 entry = &entries[i];
5020                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5021                 if (offset == reg)
5022                         return true;
5023         }
5024
5025         return false;
5026 }
5027
5028 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5029 {
5030         return gfx_v9_0_check_rlcg_range(adev, offset,
5031                                         (void *)rlcg_access_gc_9_0,
5032                                         ARRAY_SIZE(rlcg_access_gc_9_0));
5033 }
5034
5035 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5036         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5037         .set_safe_mode = gfx_v9_0_set_safe_mode,
5038         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5039         .init = gfx_v9_0_rlc_init,
5040         .get_csb_size = gfx_v9_0_get_csb_size,
5041         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5042         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5043         .resume = gfx_v9_0_rlc_resume,
5044         .stop = gfx_v9_0_rlc_stop,
5045         .reset = gfx_v9_0_rlc_reset,
5046         .start = gfx_v9_0_rlc_start,
5047         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5048         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5049         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5050 };
5051
5052 static int gfx_v9_0_set_powergating_state(void *handle,
5053                                           enum amd_powergating_state state)
5054 {
5055         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5056         bool enable = (state == AMD_PG_STATE_GATE);
5057
5058         switch (adev->asic_type) {
5059         case CHIP_RAVEN:
5060         case CHIP_RENOIR:
5061                 if (!enable)
5062                         amdgpu_gfx_off_ctrl(adev, false);
5063
5064                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5065                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5066                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5067                 } else {
5068                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5069                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5070                 }
5071
5072                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5073                         gfx_v9_0_enable_cp_power_gating(adev, true);
5074                 else
5075                         gfx_v9_0_enable_cp_power_gating(adev, false);
5076
5077                 /* update gfx cgpg state */
5078                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5079
5080                 /* update mgcg state */
5081                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5082
5083                 if (enable)
5084                         amdgpu_gfx_off_ctrl(adev, true);
5085                 break;
5086         case CHIP_VEGA12:
5087                 amdgpu_gfx_off_ctrl(adev, enable);
5088                 break;
5089         default:
5090                 break;
5091         }
5092
5093         return 0;
5094 }
5095
5096 static int gfx_v9_0_set_clockgating_state(void *handle,
5097                                           enum amd_clockgating_state state)
5098 {
5099         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5100
5101         if (amdgpu_sriov_vf(adev))
5102                 return 0;
5103
5104         switch (adev->asic_type) {
5105         case CHIP_VEGA10:
5106         case CHIP_VEGA12:
5107         case CHIP_VEGA20:
5108         case CHIP_RAVEN:
5109         case CHIP_ARCTURUS:
5110         case CHIP_RENOIR:
5111                 gfx_v9_0_update_gfx_clock_gating(adev,
5112                                                  state == AMD_CG_STATE_GATE);
5113                 break;
5114         default:
5115                 break;
5116         }
5117         return 0;
5118 }
5119
5120 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5121 {
5122         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5123         int data;
5124
5125         if (amdgpu_sriov_vf(adev))
5126                 *flags = 0;
5127
5128         /* AMD_CG_SUPPORT_GFX_MGCG */
5129         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5130         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5131                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5132
5133         /* AMD_CG_SUPPORT_GFX_CGCG */
5134         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5135         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5136                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5137
5138         /* AMD_CG_SUPPORT_GFX_CGLS */
5139         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5140                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5141
5142         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5143         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5144         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5145                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5146
5147         /* AMD_CG_SUPPORT_GFX_CP_LS */
5148         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5149         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5150                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5151
5152         if (adev->asic_type != CHIP_ARCTURUS) {
5153                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5154                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5155                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5156                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5157
5158                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5159                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5160                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5161         }
5162 }
5163
5164 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5165 {
5166         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5167 }
5168
5169 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5170 {
5171         struct amdgpu_device *adev = ring->adev;
5172         u64 wptr;
5173
5174         /* XXX check if swapping is necessary on BE */
5175         if (ring->use_doorbell) {
5176                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5177         } else {
5178                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5179                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5180         }
5181
5182         return wptr;
5183 }
5184
5185 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5186 {
5187         struct amdgpu_device *adev = ring->adev;
5188
5189         if (ring->use_doorbell) {
5190                 /* XXX check if swapping is necessary on BE */
5191                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5192                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5193         } else {
5194                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5195                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5196         }
5197 }
5198
5199 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5200 {
5201         struct amdgpu_device *adev = ring->adev;
5202         u32 ref_and_mask, reg_mem_engine;
5203         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5204
5205         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5206                 switch (ring->me) {
5207                 case 1:
5208                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5209                         break;
5210                 case 2:
5211                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5212                         break;
5213                 default:
5214                         return;
5215                 }
5216                 reg_mem_engine = 0;
5217         } else {
5218                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5219                 reg_mem_engine = 1; /* pfp */
5220         }
5221
5222         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5223                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5224                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5225                               ref_and_mask, ref_and_mask, 0x20);
5226 }
5227
5228 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5229                                         struct amdgpu_job *job,
5230                                         struct amdgpu_ib *ib,
5231                                         uint32_t flags)
5232 {
5233         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5234         u32 header, control = 0;
5235
5236         if (ib->flags & AMDGPU_IB_FLAG_CE)
5237                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5238         else
5239                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5240
5241         control |= ib->length_dw | (vmid << 24);
5242
5243         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5244                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5245
5246                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5247                         gfx_v9_0_ring_emit_de_meta(ring);
5248         }
5249
5250         amdgpu_ring_write(ring, header);
5251         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5252         amdgpu_ring_write(ring,
5253 #ifdef __BIG_ENDIAN
5254                 (2 << 0) |
5255 #endif
5256                 lower_32_bits(ib->gpu_addr));
5257         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5258         amdgpu_ring_write(ring, control);
5259 }
5260
5261 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5262                                           struct amdgpu_job *job,
5263                                           struct amdgpu_ib *ib,
5264                                           uint32_t flags)
5265 {
5266         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5267         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5268
5269         /* Currently, there is a high possibility to get wave ID mismatch
5270          * between ME and GDS, leading to a hw deadlock, because ME generates
5271          * different wave IDs than the GDS expects. This situation happens
5272          * randomly when at least 5 compute pipes use GDS ordered append.
5273          * The wave IDs generated by ME are also wrong after suspend/resume.
5274          * Those are probably bugs somewhere else in the kernel driver.
5275          *
5276          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5277          * GDS to 0 for this ring (me/pipe).
5278          */
5279         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5280                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5281                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5282                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5283         }
5284
5285         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5286         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5287         amdgpu_ring_write(ring,
5288 #ifdef __BIG_ENDIAN
5289                                 (2 << 0) |
5290 #endif
5291                                 lower_32_bits(ib->gpu_addr));
5292         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5293         amdgpu_ring_write(ring, control);
5294 }
5295
5296 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5297                                      u64 seq, unsigned flags)
5298 {
5299         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5300         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5301         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5302
5303         /* RELEASE_MEM - flush caches, send int */
5304         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5305         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5306                                                EOP_TC_NC_ACTION_EN) :
5307                                               (EOP_TCL1_ACTION_EN |
5308                                                EOP_TC_ACTION_EN |
5309                                                EOP_TC_WB_ACTION_EN |
5310                                                EOP_TC_MD_ACTION_EN)) |
5311                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5312                                  EVENT_INDEX(5)));
5313         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5314
5315         /*
5316          * the address should be Qword aligned if 64bit write, Dword
5317          * aligned if only send 32bit data low (discard data high)
5318          */
5319         if (write64bit)
5320                 BUG_ON(addr & 0x7);
5321         else
5322                 BUG_ON(addr & 0x3);
5323         amdgpu_ring_write(ring, lower_32_bits(addr));
5324         amdgpu_ring_write(ring, upper_32_bits(addr));
5325         amdgpu_ring_write(ring, lower_32_bits(seq));
5326         amdgpu_ring_write(ring, upper_32_bits(seq));
5327         amdgpu_ring_write(ring, 0);
5328 }
5329
5330 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5331 {
5332         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5333         uint32_t seq = ring->fence_drv.sync_seq;
5334         uint64_t addr = ring->fence_drv.gpu_addr;
5335
5336         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5337                               lower_32_bits(addr), upper_32_bits(addr),
5338                               seq, 0xffffffff, 4);
5339 }
5340
5341 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5342                                         unsigned vmid, uint64_t pd_addr)
5343 {
5344         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5345
5346         /* compute doesn't have PFP */
5347         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5348                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5349                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5350                 amdgpu_ring_write(ring, 0x0);
5351         }
5352 }
5353
5354 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5355 {
5356         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5357 }
5358
5359 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5360 {
5361         u64 wptr;
5362
5363         /* XXX check if swapping is necessary on BE */
5364         if (ring->use_doorbell)
5365                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5366         else
5367                 BUG();
5368         return wptr;
5369 }
5370
5371 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5372 {
5373         struct amdgpu_device *adev = ring->adev;
5374
5375         /* XXX check if swapping is necessary on BE */
5376         if (ring->use_doorbell) {
5377                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5378                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5379         } else{
5380                 BUG(); /* only DOORBELL method supported on gfx9 now */
5381         }
5382 }
5383
5384 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5385                                          u64 seq, unsigned int flags)
5386 {
5387         struct amdgpu_device *adev = ring->adev;
5388
5389         /* we only allocate 32bit for each seq wb address */
5390         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5391
5392         /* write fence seq to the "addr" */
5393         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5394         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5395                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5396         amdgpu_ring_write(ring, lower_32_bits(addr));
5397         amdgpu_ring_write(ring, upper_32_bits(addr));
5398         amdgpu_ring_write(ring, lower_32_bits(seq));
5399
5400         if (flags & AMDGPU_FENCE_FLAG_INT) {
5401                 /* set register to trigger INT */
5402                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5403                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5404                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5405                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5406                 amdgpu_ring_write(ring, 0);
5407                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5408         }
5409 }
5410
5411 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5412 {
5413         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5414         amdgpu_ring_write(ring, 0);
5415 }
5416
5417 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5418 {
5419         struct v9_ce_ib_state ce_payload = {0};
5420         uint64_t csa_addr;
5421         int cnt;
5422
5423         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5424         csa_addr = amdgpu_csa_vaddr(ring->adev);
5425
5426         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5427         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5428                                  WRITE_DATA_DST_SEL(8) |
5429                                  WR_CONFIRM) |
5430                                  WRITE_DATA_CACHE_POLICY(0));
5431         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5432         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5433         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5434 }
5435
5436 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5437 {
5438         struct v9_de_ib_state de_payload = {0};
5439         uint64_t csa_addr, gds_addr;
5440         int cnt;
5441
5442         csa_addr = amdgpu_csa_vaddr(ring->adev);
5443         gds_addr = csa_addr + 4096;
5444         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5445         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5446
5447         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5448         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5449         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5450                                  WRITE_DATA_DST_SEL(8) |
5451                                  WR_CONFIRM) |
5452                                  WRITE_DATA_CACHE_POLICY(0));
5453         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5454         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5455         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5456 }
5457
5458 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5459                                    bool secure)
5460 {
5461         uint32_t v = secure ? FRAME_TMZ : 0;
5462
5463         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5464         amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5465 }
5466
5467 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5468 {
5469         uint32_t dw2 = 0;
5470
5471         if (amdgpu_sriov_vf(ring->adev))
5472                 gfx_v9_0_ring_emit_ce_meta(ring);
5473
5474         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5475         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5476                 /* set load_global_config & load_global_uconfig */
5477                 dw2 |= 0x8001;
5478                 /* set load_cs_sh_regs */
5479                 dw2 |= 0x01000000;
5480                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5481                 dw2 |= 0x10002;
5482
5483                 /* set load_ce_ram if preamble presented */
5484                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5485                         dw2 |= 0x10000000;
5486         } else {
5487                 /* still load_ce_ram if this is the first time preamble presented
5488                  * although there is no context switch happens.
5489                  */
5490                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5491                         dw2 |= 0x10000000;
5492         }
5493
5494         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5495         amdgpu_ring_write(ring, dw2);
5496         amdgpu_ring_write(ring, 0);
5497 }
5498
5499 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5500 {
5501         unsigned ret;
5502         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5503         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5504         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5505         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5506         ret = ring->wptr & ring->buf_mask;
5507         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5508         return ret;
5509 }
5510
5511 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5512 {
5513         unsigned cur;
5514         BUG_ON(offset > ring->buf_mask);
5515         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5516
5517         cur = (ring->wptr & ring->buf_mask) - 1;
5518         if (likely(cur > offset))
5519                 ring->ring[offset] = cur - offset;
5520         else
5521                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5522 }
5523
5524 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5525                                     uint32_t reg_val_offs)
5526 {
5527         struct amdgpu_device *adev = ring->adev;
5528
5529         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5530         amdgpu_ring_write(ring, 0 |     /* src: register*/
5531                                 (5 << 8) |      /* dst: memory */
5532                                 (1 << 20));     /* write confirm */
5533         amdgpu_ring_write(ring, reg);
5534         amdgpu_ring_write(ring, 0);
5535         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5536                                 reg_val_offs * 4));
5537         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5538                                 reg_val_offs * 4));
5539 }
5540
5541 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5542                                     uint32_t val)
5543 {
5544         uint32_t cmd = 0;
5545
5546         switch (ring->funcs->type) {
5547         case AMDGPU_RING_TYPE_GFX:
5548                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5549                 break;
5550         case AMDGPU_RING_TYPE_KIQ:
5551                 cmd = (1 << 16); /* no inc addr */
5552                 break;
5553         default:
5554                 cmd = WR_CONFIRM;
5555                 break;
5556         }
5557         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5558         amdgpu_ring_write(ring, cmd);
5559         amdgpu_ring_write(ring, reg);
5560         amdgpu_ring_write(ring, 0);
5561         amdgpu_ring_write(ring, val);
5562 }
5563
5564 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5565                                         uint32_t val, uint32_t mask)
5566 {
5567         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5568 }
5569
5570 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5571                                                   uint32_t reg0, uint32_t reg1,
5572                                                   uint32_t ref, uint32_t mask)
5573 {
5574         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5575         struct amdgpu_device *adev = ring->adev;
5576         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5577                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5578
5579         if (fw_version_ok)
5580                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5581                                       ref, mask, 0x20);
5582         else
5583                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5584                                                            ref, mask);
5585 }
5586
5587 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5588 {
5589         struct amdgpu_device *adev = ring->adev;
5590         uint32_t value = 0;
5591
5592         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5593         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5594         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5595         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5596         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5597 }
5598
5599 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5600                                                  enum amdgpu_interrupt_state state)
5601 {
5602         switch (state) {
5603         case AMDGPU_IRQ_STATE_DISABLE:
5604         case AMDGPU_IRQ_STATE_ENABLE:
5605                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5606                                TIME_STAMP_INT_ENABLE,
5607                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5608                 break;
5609         default:
5610                 break;
5611         }
5612 }
5613
5614 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5615                                                      int me, int pipe,
5616                                                      enum amdgpu_interrupt_state state)
5617 {
5618         u32 mec_int_cntl, mec_int_cntl_reg;
5619
5620         /*
5621          * amdgpu controls only the first MEC. That's why this function only
5622          * handles the setting of interrupts for this specific MEC. All other
5623          * pipes' interrupts are set by amdkfd.
5624          */
5625
5626         if (me == 1) {
5627                 switch (pipe) {
5628                 case 0:
5629                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5630                         break;
5631                 case 1:
5632                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5633                         break;
5634                 case 2:
5635                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5636                         break;
5637                 case 3:
5638                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5639                         break;
5640                 default:
5641                         DRM_DEBUG("invalid pipe %d\n", pipe);
5642                         return;
5643                 }
5644         } else {
5645                 DRM_DEBUG("invalid me %d\n", me);
5646                 return;
5647         }
5648
5649         switch (state) {
5650         case AMDGPU_IRQ_STATE_DISABLE:
5651                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5652                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5653                                              TIME_STAMP_INT_ENABLE, 0);
5654                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5655                 break;
5656         case AMDGPU_IRQ_STATE_ENABLE:
5657                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5658                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5659                                              TIME_STAMP_INT_ENABLE, 1);
5660                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5661                 break;
5662         default:
5663                 break;
5664         }
5665 }
5666
5667 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5668                                              struct amdgpu_irq_src *source,
5669                                              unsigned type,
5670                                              enum amdgpu_interrupt_state state)
5671 {
5672         switch (state) {
5673         case AMDGPU_IRQ_STATE_DISABLE:
5674         case AMDGPU_IRQ_STATE_ENABLE:
5675                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5676                                PRIV_REG_INT_ENABLE,
5677                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5678                 break;
5679         default:
5680                 break;
5681         }
5682
5683         return 0;
5684 }
5685
5686 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5687                                               struct amdgpu_irq_src *source,
5688                                               unsigned type,
5689                                               enum amdgpu_interrupt_state state)
5690 {
5691         switch (state) {
5692         case AMDGPU_IRQ_STATE_DISABLE:
5693         case AMDGPU_IRQ_STATE_ENABLE:
5694                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5695                                PRIV_INSTR_INT_ENABLE,
5696                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5697                 break;
5698         default:
5699                 break;
5700         }
5701
5702         return 0;
5703 }
5704
5705 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5706         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5707                         CP_ECC_ERROR_INT_ENABLE, 1)
5708
5709 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5710         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5711                         CP_ECC_ERROR_INT_ENABLE, 0)
5712
5713 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5714                                               struct amdgpu_irq_src *source,
5715                                               unsigned type,
5716                                               enum amdgpu_interrupt_state state)
5717 {
5718         switch (state) {
5719         case AMDGPU_IRQ_STATE_DISABLE:
5720                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5721                                 CP_ECC_ERROR_INT_ENABLE, 0);
5722                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5723                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5724                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5725                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5726                 break;
5727
5728         case AMDGPU_IRQ_STATE_ENABLE:
5729                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5730                                 CP_ECC_ERROR_INT_ENABLE, 1);
5731                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5732                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5733                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5734                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5735                 break;
5736         default:
5737                 break;
5738         }
5739
5740         return 0;
5741 }
5742
5743
5744 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5745                                             struct amdgpu_irq_src *src,
5746                                             unsigned type,
5747                                             enum amdgpu_interrupt_state state)
5748 {
5749         switch (type) {
5750         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5751                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5752                 break;
5753         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5754                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5755                 break;
5756         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5757                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5758                 break;
5759         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5760                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5761                 break;
5762         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5763                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5764                 break;
5765         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5766                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5767                 break;
5768         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5769                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5770                 break;
5771         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5772                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5773                 break;
5774         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5775                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5776                 break;
5777         default:
5778                 break;
5779         }
5780         return 0;
5781 }
5782
5783 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5784                             struct amdgpu_irq_src *source,
5785                             struct amdgpu_iv_entry *entry)
5786 {
5787         int i;
5788         u8 me_id, pipe_id, queue_id;
5789         struct amdgpu_ring *ring;
5790
5791         DRM_DEBUG("IH: CP EOP\n");
5792         me_id = (entry->ring_id & 0x0c) >> 2;
5793         pipe_id = (entry->ring_id & 0x03) >> 0;
5794         queue_id = (entry->ring_id & 0x70) >> 4;
5795
5796         switch (me_id) {
5797         case 0:
5798                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5799                 break;
5800         case 1:
5801         case 2:
5802                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5803                         ring = &adev->gfx.compute_ring[i];
5804                         /* Per-queue interrupt is supported for MEC starting from VI.
5805                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5806                           */
5807                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5808                                 amdgpu_fence_process(ring);
5809                 }
5810                 break;
5811         }
5812         return 0;
5813 }
5814
5815 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5816                            struct amdgpu_iv_entry *entry)
5817 {
5818         u8 me_id, pipe_id, queue_id;
5819         struct amdgpu_ring *ring;
5820         int i;
5821
5822         me_id = (entry->ring_id & 0x0c) >> 2;
5823         pipe_id = (entry->ring_id & 0x03) >> 0;
5824         queue_id = (entry->ring_id & 0x70) >> 4;
5825
5826         switch (me_id) {
5827         case 0:
5828                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5829                 break;
5830         case 1:
5831         case 2:
5832                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5833                         ring = &adev->gfx.compute_ring[i];
5834                         if (ring->me == me_id && ring->pipe == pipe_id &&
5835                             ring->queue == queue_id)
5836                                 drm_sched_fault(&ring->sched);
5837                 }
5838                 break;
5839         }
5840 }
5841
5842 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5843                                  struct amdgpu_irq_src *source,
5844                                  struct amdgpu_iv_entry *entry)
5845 {
5846         DRM_ERROR("Illegal register access in command stream\n");
5847         gfx_v9_0_fault(adev, entry);
5848         return 0;
5849 }
5850
5851 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5852                                   struct amdgpu_irq_src *source,
5853                                   struct amdgpu_iv_entry *entry)
5854 {
5855         DRM_ERROR("Illegal instruction in command stream\n");
5856         gfx_v9_0_fault(adev, entry);
5857         return 0;
5858 }
5859
5860
5861 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5862         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5863           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5864           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5865         },
5866         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5867           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5868           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5869         },
5870         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5871           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5872           0, 0
5873         },
5874         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5875           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5876           0, 0
5877         },
5878         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5879           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5880           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5881         },
5882         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5883           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5884           0, 0
5885         },
5886         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5887           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5888           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5889         },
5890         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5891           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5892           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5893         },
5894         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5895           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5896           0, 0
5897         },
5898         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5899           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5900           0, 0
5901         },
5902         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5903           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5904           0, 0
5905         },
5906         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5907           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5908           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5909         },
5910         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5911           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5912           0, 0
5913         },
5914         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5915           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5916           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5917         },
5918         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5919           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5920           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5921           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5922         },
5923         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5924           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5925           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5926           0, 0
5927         },
5928         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5929           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5930           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5931           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5932         },
5933         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5934           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5935           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5936           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5937         },
5938         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5939           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5940           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5941           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5942         },
5943         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5944           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5945           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5946           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5947         },
5948         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5949           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5950           0, 0
5951         },
5952         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5953           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5954           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5955         },
5956         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5957           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5958           0, 0
5959         },
5960         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5961           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5962           0, 0
5963         },
5964         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5965           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5966           0, 0
5967         },
5968         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5969           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5970           0, 0
5971         },
5972         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5973           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5974           0, 0
5975         },
5976         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5977           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5978           0, 0
5979         },
5980         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5981           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5982           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5983         },
5984         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5985           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5986           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5987         },
5988         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5989           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5990           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5991         },
5992         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5993           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5994           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5995         },
5996         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5997           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5998           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5999         },
6000         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6001           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6002           0, 0
6003         },
6004         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6005           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6006           0, 0
6007         },
6008         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6009           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6010           0, 0
6011         },
6012         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6013           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6014           0, 0
6015         },
6016         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6017           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6018           0, 0
6019         },
6020         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6021           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6022           0, 0
6023         },
6024         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6025           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6026           0, 0
6027         },
6028         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6029           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6030           0, 0
6031         },
6032         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6033           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6034           0, 0
6035         },
6036         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6037           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6038           0, 0
6039         },
6040         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6041           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6042           0, 0
6043         },
6044         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6045           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6046           0, 0
6047         },
6048         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6049           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6050           0, 0
6051         },
6052         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6053           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6054           0, 0
6055         },
6056         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6057           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6058           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6059         },
6060         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6061           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6062           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6063         },
6064         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6065           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6066           0, 0
6067         },
6068         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6069           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6070           0, 0
6071         },
6072         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6073           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6074           0, 0
6075         },
6076         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6077           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6078           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6079         },
6080         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6081           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6082           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6083         },
6084         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6085           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6086           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6087         },
6088         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6089           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6090           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6091         },
6092         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6093           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6094           0, 0
6095         },
6096         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6097           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6098           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6099         },
6100         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6101           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6102           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6103         },
6104         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6105           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6106           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6107         },
6108         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6109           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6110           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6111         },
6112         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6113           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6114           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6115         },
6116         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6117           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6118           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6119         },
6120         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6121           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6122           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6123         },
6124         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6125           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6126           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6127         },
6128         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6129           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6130           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6131         },
6132         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6133           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6134           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6135         },
6136         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6137           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6138           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6139         },
6140         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6141           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6142           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6143         },
6144         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6145           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6146           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6147         },
6148         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6149           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6150           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6151         },
6152         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6153           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6154           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6155         },
6156         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6157           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6158           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6159         },
6160         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6161           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6162           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6163         },
6164         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6165           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6166           0, 0
6167         },
6168         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6169           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6170           0, 0
6171         },
6172         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6173           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6174           0, 0
6175         },
6176         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6177           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6178           0, 0
6179         },
6180         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6181           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6182           0, 0
6183         },
6184         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6185           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6186           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6187         },
6188         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6189           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6190           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6191         },
6192         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6193           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6194           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6195         },
6196         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6197           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6198           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6199         },
6200         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6201           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6202           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6203         },
6204         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6205           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6206           0, 0
6207         },
6208         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6209           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6210           0, 0
6211         },
6212         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6213           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6214           0, 0
6215         },
6216         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6217           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6218           0, 0
6219         },
6220         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6221           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6222           0, 0
6223         },
6224         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6225           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6226           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6227         },
6228         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6229           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6230           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6231         },
6232         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6233           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6234           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6235         },
6236         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6237           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6238           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6239         },
6240         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6241           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6242           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6243         },
6244         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6245           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6246           0, 0
6247         },
6248         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6249           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6250           0, 0
6251         },
6252         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6253           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6254           0, 0
6255         },
6256         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6257           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6258           0, 0
6259         },
6260         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6261           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6262           0, 0
6263         },
6264         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6265           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6266           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6267         },
6268         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6269           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6270           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6271         },
6272         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6273           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6274           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6275         },
6276         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6277           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6278           0, 0
6279         },
6280         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6281           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6282           0, 0
6283         },
6284         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6285           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6286           0, 0
6287         },
6288         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6289           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6290           0, 0
6291         },
6292         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6293           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6294           0, 0
6295         },
6296         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6297           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6298           0, 0
6299         }
6300 };
6301
6302 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6303                                      void *inject_if)
6304 {
6305         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6306         int ret;
6307         struct ta_ras_trigger_error_input block_info = { 0 };
6308
6309         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6310                 return -EINVAL;
6311
6312         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6313                 return -EINVAL;
6314
6315         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6316                 return -EPERM;
6317
6318         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6319               info->head.type)) {
6320                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6321                         ras_gfx_subblocks[info->head.sub_block_index].name,
6322                         info->head.type);
6323                 return -EPERM;
6324         }
6325
6326         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6327               info->head.type)) {
6328                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6329                         ras_gfx_subblocks[info->head.sub_block_index].name,
6330                         info->head.type);
6331                 return -EPERM;
6332         }
6333
6334         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6335         block_info.sub_block_index =
6336                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6337         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6338         block_info.address = info->address;
6339         block_info.value = info->value;
6340
6341         mutex_lock(&adev->grbm_idx_mutex);
6342         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6343         mutex_unlock(&adev->grbm_idx_mutex);
6344
6345         return ret;
6346 }
6347
6348 static const char *vml2_mems[] = {
6349         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6350         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6351         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6352         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6353         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6354         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6355         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6356         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6357         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6358         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6359         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6360         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6361         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6362         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6363         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6364         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6365 };
6366
6367 static const char *vml2_walker_mems[] = {
6368         "UTC_VML2_CACHE_PDE0_MEM0",
6369         "UTC_VML2_CACHE_PDE0_MEM1",
6370         "UTC_VML2_CACHE_PDE1_MEM0",
6371         "UTC_VML2_CACHE_PDE1_MEM1",
6372         "UTC_VML2_CACHE_PDE2_MEM0",
6373         "UTC_VML2_CACHE_PDE2_MEM1",
6374         "UTC_VML2_RDIF_LOG_FIFO",
6375 };
6376
6377 static const char *atc_l2_cache_2m_mems[] = {
6378         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6379         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6380         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6381         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6382 };
6383
6384 static const char *atc_l2_cache_4k_mems[] = {
6385         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6386         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6387         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6388         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6389         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6390         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6391         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6392         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6393         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6394         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6395         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6396         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6397         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6398         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6399         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6400         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6401         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6402         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6403         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6404         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6405         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6406         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6407         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6408         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6409         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6410         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6411         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6412         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6413         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6414         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6415         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6416         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6417 };
6418
6419 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6420                                          struct ras_err_data *err_data)
6421 {
6422         uint32_t i, data;
6423         uint32_t sec_count, ded_count;
6424
6425         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6426         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6427         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6428         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6429         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6430         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6431         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6432         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6433
6434         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6435                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6436                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6437
6438                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6439                 if (sec_count) {
6440                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6441                                 "SEC %d\n", i, vml2_mems[i], sec_count);
6442                         err_data->ce_count += sec_count;
6443                 }
6444
6445                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6446                 if (ded_count) {
6447                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6448                                 "DED %d\n", i, vml2_mems[i], ded_count);
6449                         err_data->ue_count += ded_count;
6450                 }
6451         }
6452
6453         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6454                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6455                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6456
6457                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6458                                                 SEC_COUNT);
6459                 if (sec_count) {
6460                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6461                                 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6462                         err_data->ce_count += sec_count;
6463                 }
6464
6465                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6466                                                 DED_COUNT);
6467                 if (ded_count) {
6468                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6469                                 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6470                         err_data->ue_count += ded_count;
6471                 }
6472         }
6473
6474         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6475                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6476                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6477
6478                 sec_count = (data & 0x00006000L) >> 0xd;
6479                 if (sec_count) {
6480                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6481                                 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6482                                 sec_count);
6483                         err_data->ce_count += sec_count;
6484                 }
6485         }
6486
6487         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6488                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6489                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6490
6491                 sec_count = (data & 0x00006000L) >> 0xd;
6492                 if (sec_count) {
6493                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6494                                 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6495                                 sec_count);
6496                         err_data->ce_count += sec_count;
6497                 }
6498
6499                 ded_count = (data & 0x00018000L) >> 0xf;
6500                 if (ded_count) {
6501                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6502                                 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6503                                 ded_count);
6504                         err_data->ue_count += ded_count;
6505                 }
6506         }
6507
6508         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6509         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6510         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6511         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6512
6513         return 0;
6514 }
6515
6516 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6517         const struct soc15_reg_entry *reg,
6518         uint32_t se_id, uint32_t inst_id, uint32_t value,
6519         uint32_t *sec_count, uint32_t *ded_count)
6520 {
6521         uint32_t i;
6522         uint32_t sec_cnt, ded_cnt;
6523
6524         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6525                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6526                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6527                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6528                         continue;
6529
6530                 sec_cnt = (value &
6531                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6532                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6533                 if (sec_cnt) {
6534                         dev_info(adev->dev, "GFX SubBlock %s, "
6535                                 "Instance[%d][%d], SEC %d\n",
6536                                 gfx_v9_0_ras_fields[i].name,
6537                                 se_id, inst_id,
6538                                 sec_cnt);
6539                         *sec_count += sec_cnt;
6540                 }
6541
6542                 ded_cnt = (value &
6543                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6544                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6545                 if (ded_cnt) {
6546                         dev_info(adev->dev, "GFX SubBlock %s, "
6547                                 "Instance[%d][%d], DED %d\n",
6548                                 gfx_v9_0_ras_fields[i].name,
6549                                 se_id, inst_id,
6550                                 ded_cnt);
6551                         *ded_count += ded_cnt;
6552                 }
6553         }
6554
6555         return 0;
6556 }
6557
6558 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6559 {
6560         int i, j, k;
6561
6562         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6563                 return;
6564
6565         /* read back registers to clear the counters */
6566         mutex_lock(&adev->grbm_idx_mutex);
6567         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6568                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6569                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6570                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6571                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6572                         }
6573                 }
6574         }
6575         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6576         mutex_unlock(&adev->grbm_idx_mutex);
6577
6578         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6579         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6580         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6581         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6582         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6583         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6584         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6585         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6586
6587         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6588                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6589                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6590         }
6591
6592         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6593                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6594                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6595         }
6596
6597         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6598                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6599                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6600         }
6601
6602         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6603                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6604                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6605         }
6606
6607         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6608         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6609         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6610         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6611 }
6612
6613 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6614                                           void *ras_error_status)
6615 {
6616         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6617         uint32_t sec_count = 0, ded_count = 0;
6618         uint32_t i, j, k;
6619         uint32_t reg_value;
6620
6621         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6622                 return -EINVAL;
6623
6624         err_data->ue_count = 0;
6625         err_data->ce_count = 0;
6626
6627         mutex_lock(&adev->grbm_idx_mutex);
6628
6629         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6630                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6631                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6632                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6633                                 reg_value =
6634                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6635                                 if (reg_value)
6636                                         gfx_v9_0_ras_error_count(adev,
6637                                                 &gfx_v9_0_edc_counter_regs[i],
6638                                                 j, k, reg_value,
6639                                                 &sec_count, &ded_count);
6640                         }
6641                 }
6642         }
6643
6644         err_data->ce_count += sec_count;
6645         err_data->ue_count += ded_count;
6646
6647         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6648         mutex_unlock(&adev->grbm_idx_mutex);
6649
6650         gfx_v9_0_query_utc_edc_status(adev, err_data);
6651
6652         return 0;
6653 }
6654
6655 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6656 {
6657         const unsigned int cp_coher_cntl =
6658                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6659                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6660                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6661                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6662                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6663
6664         /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6665         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6666         amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6667         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6668         amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6669         amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6670         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6671         amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6672 }
6673
6674 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6675         .name = "gfx_v9_0",
6676         .early_init = gfx_v9_0_early_init,
6677         .late_init = gfx_v9_0_late_init,
6678         .sw_init = gfx_v9_0_sw_init,
6679         .sw_fini = gfx_v9_0_sw_fini,
6680         .hw_init = gfx_v9_0_hw_init,
6681         .hw_fini = gfx_v9_0_hw_fini,
6682         .suspend = gfx_v9_0_suspend,
6683         .resume = gfx_v9_0_resume,
6684         .is_idle = gfx_v9_0_is_idle,
6685         .wait_for_idle = gfx_v9_0_wait_for_idle,
6686         .soft_reset = gfx_v9_0_soft_reset,
6687         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6688         .set_powergating_state = gfx_v9_0_set_powergating_state,
6689         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6690 };
6691
6692 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6693         .type = AMDGPU_RING_TYPE_GFX,
6694         .align_mask = 0xff,
6695         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6696         .support_64bit_ptrs = true,
6697         .vmhub = AMDGPU_GFXHUB_0,
6698         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6699         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6700         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6701         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6702                 5 +  /* COND_EXEC */
6703                 7 +  /* PIPELINE_SYNC */
6704                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6705                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6706                 2 + /* VM_FLUSH */
6707                 8 +  /* FENCE for VM_FLUSH */
6708                 20 + /* GDS switch */
6709                 4 + /* double SWITCH_BUFFER,
6710                        the first COND_EXEC jump to the place just
6711                            prior to this double SWITCH_BUFFER  */
6712                 5 + /* COND_EXEC */
6713                 7 +      /*     HDP_flush */
6714                 4 +      /*     VGT_flush */
6715                 14 + /* CE_META */
6716                 31 + /* DE_META */
6717                 3 + /* CNTX_CTRL */
6718                 5 + /* HDP_INVL */
6719                 8 + 8 + /* FENCE x2 */
6720                 2 + /* SWITCH_BUFFER */
6721                 7, /* gfx_v9_0_emit_mem_sync */
6722         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6723         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6724         .emit_fence = gfx_v9_0_ring_emit_fence,
6725         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6726         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6727         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6728         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6729         .test_ring = gfx_v9_0_ring_test_ring,
6730         .test_ib = gfx_v9_0_ring_test_ib,
6731         .insert_nop = amdgpu_ring_insert_nop,
6732         .pad_ib = amdgpu_ring_generic_pad_ib,
6733         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6734         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6735         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6736         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6737         .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6738         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6739         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6740         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6741         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6742         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6743 };
6744
6745 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6746         .type = AMDGPU_RING_TYPE_COMPUTE,
6747         .align_mask = 0xff,
6748         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6749         .support_64bit_ptrs = true,
6750         .vmhub = AMDGPU_GFXHUB_0,
6751         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6752         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6753         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6754         .emit_frame_size =
6755                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6756                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6757                 5 + /* hdp invalidate */
6758                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6759                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6760                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6761                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6762                 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6763                 7, /* gfx_v9_0_emit_mem_sync */
6764         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6765         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6766         .emit_fence = gfx_v9_0_ring_emit_fence,
6767         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6768         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6769         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6770         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6771         .test_ring = gfx_v9_0_ring_test_ring,
6772         .test_ib = gfx_v9_0_ring_test_ib,
6773         .insert_nop = amdgpu_ring_insert_nop,
6774         .pad_ib = amdgpu_ring_generic_pad_ib,
6775         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6776         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6777         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6778         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6779 };
6780
6781 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6782         .type = AMDGPU_RING_TYPE_KIQ,
6783         .align_mask = 0xff,
6784         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6785         .support_64bit_ptrs = true,
6786         .vmhub = AMDGPU_GFXHUB_0,
6787         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6788         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6789         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6790         .emit_frame_size =
6791                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6792                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6793                 5 + /* hdp invalidate */
6794                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6795                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6796                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6797                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6798                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6799         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6800         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6801         .test_ring = gfx_v9_0_ring_test_ring,
6802         .insert_nop = amdgpu_ring_insert_nop,
6803         .pad_ib = amdgpu_ring_generic_pad_ib,
6804         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6805         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6806         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6807         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6808 };
6809
6810 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6811 {
6812         int i;
6813
6814         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6815
6816         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6817                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6818
6819         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6820                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6821 }
6822
6823 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6824         .set = gfx_v9_0_set_eop_interrupt_state,
6825         .process = gfx_v9_0_eop_irq,
6826 };
6827
6828 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6829         .set = gfx_v9_0_set_priv_reg_fault_state,
6830         .process = gfx_v9_0_priv_reg_irq,
6831 };
6832
6833 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6834         .set = gfx_v9_0_set_priv_inst_fault_state,
6835         .process = gfx_v9_0_priv_inst_irq,
6836 };
6837
6838 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6839         .set = gfx_v9_0_set_cp_ecc_error_state,
6840         .process = amdgpu_gfx_cp_ecc_error_irq,
6841 };
6842
6843
6844 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6845 {
6846         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6847         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6848
6849         adev->gfx.priv_reg_irq.num_types = 1;
6850         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6851
6852         adev->gfx.priv_inst_irq.num_types = 1;
6853         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6854
6855         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6856         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6857 }
6858
6859 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6860 {
6861         switch (adev->asic_type) {
6862         case CHIP_VEGA10:
6863         case CHIP_VEGA12:
6864         case CHIP_VEGA20:
6865         case CHIP_RAVEN:
6866         case CHIP_ARCTURUS:
6867         case CHIP_RENOIR:
6868                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6869                 break;
6870         default:
6871                 break;
6872         }
6873 }
6874
6875 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6876 {
6877         /* init asci gds info */
6878         switch (adev->asic_type) {
6879         case CHIP_VEGA10:
6880         case CHIP_VEGA12:
6881         case CHIP_VEGA20:
6882                 adev->gds.gds_size = 0x10000;
6883                 break;
6884         case CHIP_RAVEN:
6885         case CHIP_ARCTURUS:
6886                 adev->gds.gds_size = 0x1000;
6887                 break;
6888         default:
6889                 adev->gds.gds_size = 0x10000;
6890                 break;
6891         }
6892
6893         switch (adev->asic_type) {
6894         case CHIP_VEGA10:
6895         case CHIP_VEGA20:
6896                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6897                 break;
6898         case CHIP_VEGA12:
6899                 adev->gds.gds_compute_max_wave_id = 0x27f;
6900                 break;
6901         case CHIP_RAVEN:
6902                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
6903                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6904                 else
6905                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6906                 break;
6907         case CHIP_ARCTURUS:
6908                 adev->gds.gds_compute_max_wave_id = 0xfff;
6909                 break;
6910         default:
6911                 /* this really depends on the chip */
6912                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6913                 break;
6914         }
6915
6916         adev->gds.gws_size = 64;
6917         adev->gds.oa_size = 16;
6918 }
6919
6920 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6921                                                  u32 bitmap)
6922 {
6923         u32 data;
6924
6925         if (!bitmap)
6926                 return;
6927
6928         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6929         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6930
6931         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6932 }
6933
6934 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6935 {
6936         u32 data, mask;
6937
6938         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6939         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6940
6941         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6942         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6943
6944         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6945
6946         return (~data) & mask;
6947 }
6948
6949 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6950                                  struct amdgpu_cu_info *cu_info)
6951 {
6952         int i, j, k, counter, active_cu_number = 0;
6953         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6954         unsigned disable_masks[4 * 4];
6955
6956         if (!adev || !cu_info)
6957                 return -EINVAL;
6958
6959         /*
6960          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6961          */
6962         if (adev->gfx.config.max_shader_engines *
6963                 adev->gfx.config.max_sh_per_se > 16)
6964                 return -EINVAL;
6965
6966         amdgpu_gfx_parse_disable_cu(disable_masks,
6967                                     adev->gfx.config.max_shader_engines,
6968                                     adev->gfx.config.max_sh_per_se);
6969
6970         mutex_lock(&adev->grbm_idx_mutex);
6971         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6972                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6973                         mask = 1;
6974                         ao_bitmap = 0;
6975                         counter = 0;
6976                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6977                         gfx_v9_0_set_user_cu_inactive_bitmap(
6978                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6979                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6980
6981                         /*
6982                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6983                          * 4x4 size array, and it's usually suitable for Vega
6984                          * ASICs which has 4*2 SE/SH layout.
6985                          * But for Arcturus, SE/SH layout is changed to 8*1.
6986                          * To mostly reduce the impact, we make it compatible
6987                          * with current bitmap array as below:
6988                          *    SE4,SH0 --> bitmap[0][1]
6989                          *    SE5,SH0 --> bitmap[1][1]
6990                          *    SE6,SH0 --> bitmap[2][1]
6991                          *    SE7,SH0 --> bitmap[3][1]
6992                          */
6993                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6994
6995                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6996                                 if (bitmap & mask) {
6997                                         if (counter < adev->gfx.config.max_cu_per_sh)
6998                                                 ao_bitmap |= mask;
6999                                         counter ++;
7000                                 }
7001                                 mask <<= 1;
7002                         }
7003                         active_cu_number += counter;
7004                         if (i < 2 && j < 2)
7005                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7006                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7007                 }
7008         }
7009         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7010         mutex_unlock(&adev->grbm_idx_mutex);
7011
7012         cu_info->number = active_cu_number;
7013         cu_info->ao_cu_mask = ao_cu_mask;
7014         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7015
7016         return 0;
7017 }
7018
7019 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7020 {
7021         .type = AMD_IP_BLOCK_TYPE_GFX,
7022         .major = 9,
7023         .minor = 0,
7024         .rev = 0,
7025         .funcs = &gfx_v9_0_ip_funcs,
7026 };