drm/amdgpu: Remove in_interrupt() usage in gfx_v9_0_kiq_read_clock()
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47
48 #include "amdgpu_ras.h"
49
50 #include "gfx_v9_4.h"
51 #include "gfx_v9_0.h"
52 #include "gfx_v9_4_2.h"
53
54 #include "asic_reg/pwr/pwr_10_0_offset.h"
55 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56 #include "asic_reg/gc/gc_9_0_default.h"
57
58 #define GFX9_NUM_GFX_RINGS     1
59 #define GFX9_MEC_HPD_SIZE 4096
60 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
61 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
62
63 #define mmGCEA_PROBE_MAP                        0x070c
64 #define mmGCEA_PROBE_MAP_BASE_IDX               0
65
66 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
72
73 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
79
80 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
86
87 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/raven_me.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
93
94 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
101
102 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
109
110 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112
113 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118
119 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
120 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
121 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
122 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
125
126 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
127 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
128 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
129
130 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
131 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
132 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
133 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
134 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
135 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
136 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
137 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
138 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
139 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
140 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
141 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
142
143 enum ta_ras_gfx_subblock {
144         /*CPC*/
145         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
146         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
147         TA_RAS_BLOCK__GFX_CPC_UCODE,
148         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
149         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
150         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
151         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
152         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
153         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
154         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
155         /* CPF*/
156         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
157         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
158         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
159         TA_RAS_BLOCK__GFX_CPF_TAG,
160         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
161         /* CPG*/
162         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
163         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
164         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
165         TA_RAS_BLOCK__GFX_CPG_TAG,
166         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
167         /* GDS*/
168         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
169         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
170         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
171         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
172         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
173         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
174         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
175         /* SPI*/
176         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
177         /* SQ*/
178         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
179         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
180         TA_RAS_BLOCK__GFX_SQ_LDS_D,
181         TA_RAS_BLOCK__GFX_SQ_LDS_I,
182         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
183         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
184         /* SQC (3 ranges)*/
185         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
186         /* SQC range 0*/
187         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
188         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
189                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
190         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
191         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
192         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
193         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
194         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
195         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
196         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
197                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
198         /* SQC range 1*/
199         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
200         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
201                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
202         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
203         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
204         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
205         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
206         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
207         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
208         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
209         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
210         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
211                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
212         /* SQC range 2*/
213         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
214         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
215                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
216         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
217         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
218         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
219         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
220         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
221         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
222         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
223         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
224         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
225                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
226         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
227         /* TA*/
228         TA_RAS_BLOCK__GFX_TA_INDEX_START,
229         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
230         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
231         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
232         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
233         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
234         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
235         /* TCA*/
236         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
237         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
238         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
239         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
240         /* TCC (5 sub-ranges)*/
241         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
242         /* TCC range 0*/
243         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
244         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
245         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
246         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
247         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
248         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
249         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
250         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
251         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
252         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
253         /* TCC range 1*/
254         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
255         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
256         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
257         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
258                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
259         /* TCC range 2*/
260         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
261         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
262         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
263         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
264         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
265         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
266         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
267         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
268         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
269         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
270                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
271         /* TCC range 3*/
272         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
273         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
274         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
275         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
276                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
277         /* TCC range 4*/
278         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
279         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
280                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
281         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
282         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
283                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
284         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
285         /* TCI*/
286         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
287         /* TCP*/
288         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
289         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
290         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
291         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
292         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
293         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
294         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
295         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
296         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
297         /* TD*/
298         TA_RAS_BLOCK__GFX_TD_INDEX_START,
299         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
300         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
301         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
302         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
303         /* EA (3 sub-ranges)*/
304         TA_RAS_BLOCK__GFX_EA_INDEX_START,
305         /* EA range 0*/
306         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
307         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
308         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
309         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
310         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
311         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
312         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
313         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
314         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
315         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
316         /* EA range 1*/
317         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
318         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
319         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
320         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
321         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
322         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
323         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
324         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
325         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
326         /* EA range 2*/
327         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
328         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
329         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
330         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
331         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
332         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
333         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
334         /* UTC VM L2 bank*/
335         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
336         /* UTC VM walker*/
337         TA_RAS_BLOCK__UTC_VML2_WALKER,
338         /* UTC ATC L2 2MB cache*/
339         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
340         /* UTC ATC L2 4KB cache*/
341         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
342         TA_RAS_BLOCK__GFX_MAX
343 };
344
345 struct ras_gfx_subblock {
346         unsigned char *name;
347         int ta_subblock;
348         int hw_supported_error_type;
349         int sw_supported_error_type;
350 };
351
352 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
353         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
354                 #subblock,                                                     \
355                 TA_RAS_BLOCK__##subblock,                                      \
356                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
357                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
358         }
359
360 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
361         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
362         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
363         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
365         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
367         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
368         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
371         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
372         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
374         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
375         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
377         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
378                              0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
380                              0),
381         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
382         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
384         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
386         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
388         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
389                              0, 0),
390         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
391                              0),
392         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
393                              0, 0),
394         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
395                              0),
396         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
397                              0, 0),
398         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
399                              0),
400         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
401                              1),
402         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
403                              0, 0, 0),
404         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
405                              0),
406         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
407                              0),
408         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
409                              0),
410         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
411                              0),
412         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
413                              0),
414         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
415                              0, 0),
416         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
417                              0),
418         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
419                              0),
420         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
421                              0, 0, 0),
422         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
423                              0),
424         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
425                              0),
426         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
427                              0),
428         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
429                              0),
430         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
431                              0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
433                              0, 0),
434         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
435                              0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
438         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
440         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
442         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
444         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
445                              1),
446         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
447                              1),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
449                              1),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
451                              0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
453                              0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
462         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
466                              0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
469                              0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
471                              0, 0),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
473                              0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
479         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
483         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
501         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
502         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
503         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
504         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
505         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
506         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
507         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
508 };
509
510 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
511 {
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
528         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
532 };
533
534 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
535 {
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
550         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
554 };
555
556 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
557 {
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
565         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
569 };
570
571 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
572 {
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
593         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
597 };
598
599 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
600 {
601         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
602         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
603         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
604         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
608 };
609
610 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
611 {
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
627         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
631 };
632
633 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
634 {
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
643         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
646         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
647 };
648
649 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
650 {
651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
654 };
655
656 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
657 {
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
670         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
674 };
675
676 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
677 {
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
687         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
691 };
692
693 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
694 {
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
698         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
699         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
700         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
701         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
702         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
703         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
704         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
705         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
706 };
707
708 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
709         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
710         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
711 };
712
713 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
714 {
715         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
720         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
721         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
722         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
723 };
724
725 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
726 {
727         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
732         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
733         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
734         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
735 };
736
737 static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
738 {
739         static void *scratch_reg0;
740         static void *scratch_reg1;
741         static void *scratch_reg2;
742         static void *scratch_reg3;
743         static void *spare_int;
744         static uint32_t grbm_cntl;
745         static uint32_t grbm_idx;
746
747         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
748         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
749         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
750         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
751         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
752
753         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
754         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
755
756         if (amdgpu_sriov_runtime(adev)) {
757                 pr_err("shouldn't call rlcg write register during runtime\n");
758                 return;
759         }
760
761         if (offset == grbm_cntl || offset == grbm_idx) {
762                 if (offset  == grbm_cntl)
763                         writel(v, scratch_reg2);
764                 else if (offset == grbm_idx)
765                         writel(v, scratch_reg3);
766
767                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
768         } else {
769                 uint32_t i = 0;
770                 uint32_t retries = 50000;
771
772                 writel(v, scratch_reg0);
773                 writel(offset | 0x80000000, scratch_reg1);
774                 writel(1, spare_int);
775                 for (i = 0; i < retries; i++) {
776                         u32 tmp;
777
778                         tmp = readl(scratch_reg1);
779                         if (!(tmp & 0x80000000))
780                                 break;
781
782                         udelay(10);
783                 }
784                 if (i >= retries)
785                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
786         }
787
788 }
789
790 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
791 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
792 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
793 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
794
795 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
796 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
797 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
798 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
799 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
800                                 struct amdgpu_cu_info *cu_info);
801 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
802 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
803 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
804 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
805                                           void *ras_error_status);
806 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
807                                      void *inject_if);
808 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
809
810 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
811                                 uint64_t queue_mask)
812 {
813         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
814         amdgpu_ring_write(kiq_ring,
815                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
816                 /* vmid_mask:0* queue_type:0 (KIQ) */
817                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
818         amdgpu_ring_write(kiq_ring,
819                         lower_32_bits(queue_mask));     /* queue mask lo */
820         amdgpu_ring_write(kiq_ring,
821                         upper_32_bits(queue_mask));     /* queue mask hi */
822         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
823         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
824         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
825         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
826 }
827
828 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
829                                  struct amdgpu_ring *ring)
830 {
831         struct amdgpu_device *adev = kiq_ring->adev;
832         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
833         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
834         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
835
836         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
837         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
838         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
839                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
840                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
841                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
842                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
843                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
844                          /*queue_type: normal compute queue */
845                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
846                          /* alloc format: all_on_one_pipe */
847                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
848                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
849                          /* num_queues: must be 1 */
850                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
851         amdgpu_ring_write(kiq_ring,
852                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
853         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
854         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
855         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
856         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
857 }
858
859 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
860                                    struct amdgpu_ring *ring,
861                                    enum amdgpu_unmap_queues_action action,
862                                    u64 gpu_addr, u64 seq)
863 {
864         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
865
866         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
867         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
868                           PACKET3_UNMAP_QUEUES_ACTION(action) |
869                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
870                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
871                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
872         amdgpu_ring_write(kiq_ring,
873                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
874
875         if (action == PREEMPT_QUEUES_NO_UNMAP) {
876                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
877                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
878                 amdgpu_ring_write(kiq_ring, seq);
879         } else {
880                 amdgpu_ring_write(kiq_ring, 0);
881                 amdgpu_ring_write(kiq_ring, 0);
882                 amdgpu_ring_write(kiq_ring, 0);
883         }
884 }
885
886 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
887                                    struct amdgpu_ring *ring,
888                                    u64 addr,
889                                    u64 seq)
890 {
891         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
892
893         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
894         amdgpu_ring_write(kiq_ring,
895                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
896                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
897                           PACKET3_QUERY_STATUS_COMMAND(2));
898         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
899         amdgpu_ring_write(kiq_ring,
900                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
901                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
902         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
903         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
904         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
905         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
906 }
907
908 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
909                                 uint16_t pasid, uint32_t flush_type,
910                                 bool all_hub)
911 {
912         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
913         amdgpu_ring_write(kiq_ring,
914                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
915                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
916                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
917                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
918 }
919
920 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
921         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
922         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
923         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
924         .kiq_query_status = gfx_v9_0_kiq_query_status,
925         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
926         .set_resources_size = 8,
927         .map_queues_size = 7,
928         .unmap_queues_size = 6,
929         .query_status_size = 7,
930         .invalidate_tlbs_size = 2,
931 };
932
933 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
934 {
935         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
936 }
937
938 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
939 {
940         switch (adev->asic_type) {
941         case CHIP_VEGA10:
942                 soc15_program_register_sequence(adev,
943                                                 golden_settings_gc_9_0,
944                                                 ARRAY_SIZE(golden_settings_gc_9_0));
945                 soc15_program_register_sequence(adev,
946                                                 golden_settings_gc_9_0_vg10,
947                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
948                 break;
949         case CHIP_VEGA12:
950                 soc15_program_register_sequence(adev,
951                                                 golden_settings_gc_9_2_1,
952                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
953                 soc15_program_register_sequence(adev,
954                                                 golden_settings_gc_9_2_1_vg12,
955                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
956                 break;
957         case CHIP_VEGA20:
958                 soc15_program_register_sequence(adev,
959                                                 golden_settings_gc_9_0,
960                                                 ARRAY_SIZE(golden_settings_gc_9_0));
961                 soc15_program_register_sequence(adev,
962                                                 golden_settings_gc_9_0_vg20,
963                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
964                 break;
965         case CHIP_ARCTURUS:
966                 soc15_program_register_sequence(adev,
967                                                 golden_settings_gc_9_4_1_arct,
968                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
969                 break;
970         case CHIP_RAVEN:
971                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
972                                                 ARRAY_SIZE(golden_settings_gc_9_1));
973                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
974                         soc15_program_register_sequence(adev,
975                                                         golden_settings_gc_9_1_rv2,
976                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
977                 else
978                         soc15_program_register_sequence(adev,
979                                                         golden_settings_gc_9_1_rv1,
980                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
981                 break;
982          case CHIP_RENOIR:
983                 soc15_program_register_sequence(adev,
984                                                 golden_settings_gc_9_1_rn,
985                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
986                 return; /* for renoir, don't need common goldensetting */
987         case CHIP_ALDEBARAN:
988                 gfx_v9_4_2_init_golden_registers(adev,
989                                                  adev->smuio.funcs->get_die_id(adev));
990                 break;
991         default:
992                 break;
993         }
994
995         if ((adev->asic_type != CHIP_ARCTURUS) &&
996             (adev->asic_type != CHIP_ALDEBARAN))
997                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
998                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
999 }
1000
1001 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
1002 {
1003         adev->gfx.scratch.num_reg = 8;
1004         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1005         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
1006 }
1007
1008 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1009                                        bool wc, uint32_t reg, uint32_t val)
1010 {
1011         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1012         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1013                                 WRITE_DATA_DST_SEL(0) |
1014                                 (wc ? WR_CONFIRM : 0));
1015         amdgpu_ring_write(ring, reg);
1016         amdgpu_ring_write(ring, 0);
1017         amdgpu_ring_write(ring, val);
1018 }
1019
1020 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1021                                   int mem_space, int opt, uint32_t addr0,
1022                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1023                                   uint32_t inv)
1024 {
1025         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1026         amdgpu_ring_write(ring,
1027                                  /* memory (1) or register (0) */
1028                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1029                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1030                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1031                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1032
1033         if (mem_space)
1034                 BUG_ON(addr0 & 0x3); /* Dword align */
1035         amdgpu_ring_write(ring, addr0);
1036         amdgpu_ring_write(ring, addr1);
1037         amdgpu_ring_write(ring, ref);
1038         amdgpu_ring_write(ring, mask);
1039         amdgpu_ring_write(ring, inv); /* poll interval */
1040 }
1041
1042 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1043 {
1044         struct amdgpu_device *adev = ring->adev;
1045         uint32_t scratch;
1046         uint32_t tmp = 0;
1047         unsigned i;
1048         int r;
1049
1050         r = amdgpu_gfx_scratch_get(adev, &scratch);
1051         if (r)
1052                 return r;
1053
1054         WREG32(scratch, 0xCAFEDEAD);
1055         r = amdgpu_ring_alloc(ring, 3);
1056         if (r)
1057                 goto error_free_scratch;
1058
1059         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1060         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1061         amdgpu_ring_write(ring, 0xDEADBEEF);
1062         amdgpu_ring_commit(ring);
1063
1064         for (i = 0; i < adev->usec_timeout; i++) {
1065                 tmp = RREG32(scratch);
1066                 if (tmp == 0xDEADBEEF)
1067                         break;
1068                 udelay(1);
1069         }
1070
1071         if (i >= adev->usec_timeout)
1072                 r = -ETIMEDOUT;
1073
1074 error_free_scratch:
1075         amdgpu_gfx_scratch_free(adev, scratch);
1076         return r;
1077 }
1078
1079 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1080 {
1081         struct amdgpu_device *adev = ring->adev;
1082         struct amdgpu_ib ib;
1083         struct dma_fence *f = NULL;
1084
1085         unsigned index;
1086         uint64_t gpu_addr;
1087         uint32_t tmp;
1088         long r;
1089
1090         r = amdgpu_device_wb_get(adev, &index);
1091         if (r)
1092                 return r;
1093
1094         gpu_addr = adev->wb.gpu_addr + (index * 4);
1095         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1096         memset(&ib, 0, sizeof(ib));
1097         r = amdgpu_ib_get(adev, NULL, 16,
1098                                         AMDGPU_IB_POOL_DIRECT, &ib);
1099         if (r)
1100                 goto err1;
1101
1102         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1103         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1104         ib.ptr[2] = lower_32_bits(gpu_addr);
1105         ib.ptr[3] = upper_32_bits(gpu_addr);
1106         ib.ptr[4] = 0xDEADBEEF;
1107         ib.length_dw = 5;
1108
1109         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1110         if (r)
1111                 goto err2;
1112
1113         r = dma_fence_wait_timeout(f, false, timeout);
1114         if (r == 0) {
1115                 r = -ETIMEDOUT;
1116                 goto err2;
1117         } else if (r < 0) {
1118                 goto err2;
1119         }
1120
1121         tmp = adev->wb.wb[index];
1122         if (tmp == 0xDEADBEEF)
1123                 r = 0;
1124         else
1125                 r = -EINVAL;
1126
1127 err2:
1128         amdgpu_ib_free(adev, &ib, NULL);
1129         dma_fence_put(f);
1130 err1:
1131         amdgpu_device_wb_free(adev, index);
1132         return r;
1133 }
1134
1135
1136 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1137 {
1138         release_firmware(adev->gfx.pfp_fw);
1139         adev->gfx.pfp_fw = NULL;
1140         release_firmware(adev->gfx.me_fw);
1141         adev->gfx.me_fw = NULL;
1142         release_firmware(adev->gfx.ce_fw);
1143         adev->gfx.ce_fw = NULL;
1144         release_firmware(adev->gfx.rlc_fw);
1145         adev->gfx.rlc_fw = NULL;
1146         release_firmware(adev->gfx.mec_fw);
1147         adev->gfx.mec_fw = NULL;
1148         release_firmware(adev->gfx.mec2_fw);
1149         adev->gfx.mec2_fw = NULL;
1150
1151         kfree(adev->gfx.rlc.register_list_format);
1152 }
1153
1154 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1155 {
1156         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1157
1158         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1159         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1160         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1161         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1162         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1163         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1164         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1165         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1166         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1167         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1168         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1169         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1170         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1171         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1172                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1173 }
1174
1175 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1176 {
1177         adev->gfx.me_fw_write_wait = false;
1178         adev->gfx.mec_fw_write_wait = false;
1179
1180         if ((adev->asic_type != CHIP_ARCTURUS) &&
1181             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1182             (adev->gfx.mec_feature_version < 46) ||
1183             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1184             (adev->gfx.pfp_feature_version < 46)))
1185                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1186
1187         switch (adev->asic_type) {
1188         case CHIP_VEGA10:
1189                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1190                     (adev->gfx.me_feature_version >= 42) &&
1191                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1192                     (adev->gfx.pfp_feature_version >= 42))
1193                         adev->gfx.me_fw_write_wait = true;
1194
1195                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1196                     (adev->gfx.mec_feature_version >= 42))
1197                         adev->gfx.mec_fw_write_wait = true;
1198                 break;
1199         case CHIP_VEGA12:
1200                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1201                     (adev->gfx.me_feature_version >= 44) &&
1202                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1203                     (adev->gfx.pfp_feature_version >= 44))
1204                         adev->gfx.me_fw_write_wait = true;
1205
1206                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1207                     (adev->gfx.mec_feature_version >= 44))
1208                         adev->gfx.mec_fw_write_wait = true;
1209                 break;
1210         case CHIP_VEGA20:
1211                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1212                     (adev->gfx.me_feature_version >= 44) &&
1213                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1214                     (adev->gfx.pfp_feature_version >= 44))
1215                         adev->gfx.me_fw_write_wait = true;
1216
1217                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1218                     (adev->gfx.mec_feature_version >= 44))
1219                         adev->gfx.mec_fw_write_wait = true;
1220                 break;
1221         case CHIP_RAVEN:
1222                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1223                     (adev->gfx.me_feature_version >= 42) &&
1224                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1225                     (adev->gfx.pfp_feature_version >= 42))
1226                         adev->gfx.me_fw_write_wait = true;
1227
1228                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1229                     (adev->gfx.mec_feature_version >= 42))
1230                         adev->gfx.mec_fw_write_wait = true;
1231                 break;
1232         default:
1233                 adev->gfx.me_fw_write_wait = true;
1234                 adev->gfx.mec_fw_write_wait = true;
1235                 break;
1236         }
1237 }
1238
1239 struct amdgpu_gfxoff_quirk {
1240         u16 chip_vendor;
1241         u16 chip_device;
1242         u16 subsys_vendor;
1243         u16 subsys_device;
1244         u8 revision;
1245 };
1246
1247 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1248         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1249         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1250         /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1251         { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1252         /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1253         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1254         { 0, 0, 0, 0, 0 },
1255 };
1256
1257 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1258 {
1259         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1260
1261         while (p && p->chip_device != 0) {
1262                 if (pdev->vendor == p->chip_vendor &&
1263                     pdev->device == p->chip_device &&
1264                     pdev->subsystem_vendor == p->subsys_vendor &&
1265                     pdev->subsystem_device == p->subsys_device &&
1266                     pdev->revision == p->revision) {
1267                         return true;
1268                 }
1269                 ++p;
1270         }
1271         return false;
1272 }
1273
1274 static bool is_raven_kicker(struct amdgpu_device *adev)
1275 {
1276         if (adev->pm.fw_version >= 0x41e2b)
1277                 return true;
1278         else
1279                 return false;
1280 }
1281
1282 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1283 {
1284         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1285                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1286
1287         switch (adev->asic_type) {
1288         case CHIP_VEGA10:
1289         case CHIP_VEGA12:
1290         case CHIP_VEGA20:
1291                 break;
1292         case CHIP_RAVEN:
1293                 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1294                       (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1295                     ((!is_raven_kicker(adev) &&
1296                       adev->gfx.rlc_fw_version < 531) ||
1297                      (adev->gfx.rlc_feature_version < 1) ||
1298                      !adev->gfx.rlc.is_rlc_v2_1))
1299                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1300
1301                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1302                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1303                                 AMD_PG_SUPPORT_CP |
1304                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1305                 break;
1306         case CHIP_RENOIR:
1307                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1308                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1309                                 AMD_PG_SUPPORT_CP |
1310                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1311                 break;
1312         default:
1313                 break;
1314         }
1315 }
1316
1317 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1318                                           const char *chip_name)
1319 {
1320         char fw_name[30];
1321         int err;
1322         struct amdgpu_firmware_info *info = NULL;
1323         const struct common_firmware_header *header = NULL;
1324         const struct gfx_firmware_header_v1_0 *cp_hdr;
1325
1326         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1327         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1328         if (err)
1329                 goto out;
1330         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1331         if (err)
1332                 goto out;
1333         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1334         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1335         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1336
1337         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1338         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1339         if (err)
1340                 goto out;
1341         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1342         if (err)
1343                 goto out;
1344         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1345         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1346         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1347
1348         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1349         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1350         if (err)
1351                 goto out;
1352         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1353         if (err)
1354                 goto out;
1355         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1356         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1357         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1358
1359         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1360                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1361                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1362                 info->fw = adev->gfx.pfp_fw;
1363                 header = (const struct common_firmware_header *)info->fw->data;
1364                 adev->firmware.fw_size +=
1365                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1366
1367                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1368                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1369                 info->fw = adev->gfx.me_fw;
1370                 header = (const struct common_firmware_header *)info->fw->data;
1371                 adev->firmware.fw_size +=
1372                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1373
1374                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1375                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1376                 info->fw = adev->gfx.ce_fw;
1377                 header = (const struct common_firmware_header *)info->fw->data;
1378                 adev->firmware.fw_size +=
1379                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1380         }
1381
1382 out:
1383         if (err) {
1384                 dev_err(adev->dev,
1385                         "gfx9: Failed to load firmware \"%s\"\n",
1386                         fw_name);
1387                 release_firmware(adev->gfx.pfp_fw);
1388                 adev->gfx.pfp_fw = NULL;
1389                 release_firmware(adev->gfx.me_fw);
1390                 adev->gfx.me_fw = NULL;
1391                 release_firmware(adev->gfx.ce_fw);
1392                 adev->gfx.ce_fw = NULL;
1393         }
1394         return err;
1395 }
1396
1397 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1398                                           const char *chip_name)
1399 {
1400         char fw_name[30];
1401         int err;
1402         struct amdgpu_firmware_info *info = NULL;
1403         const struct common_firmware_header *header = NULL;
1404         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1405         unsigned int *tmp = NULL;
1406         unsigned int i = 0;
1407         uint16_t version_major;
1408         uint16_t version_minor;
1409         uint32_t smu_version;
1410
1411         /*
1412          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1413          * instead of picasso_rlc.bin.
1414          * Judgment method:
1415          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1416          *          or revision >= 0xD8 && revision <= 0xDF
1417          * otherwise is PCO FP5
1418          */
1419         if (!strcmp(chip_name, "picasso") &&
1420                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1421                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1422                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1423         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1424                 (smu_version >= 0x41e2b))
1425                 /**
1426                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1427                 */
1428                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1429         else
1430                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1431         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1432         if (err)
1433                 goto out;
1434         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1435         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1436
1437         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1438         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1439         if (version_major == 2 && version_minor == 1)
1440                 adev->gfx.rlc.is_rlc_v2_1 = true;
1441
1442         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1443         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1444         adev->gfx.rlc.save_and_restore_offset =
1445                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1446         adev->gfx.rlc.clear_state_descriptor_offset =
1447                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1448         adev->gfx.rlc.avail_scratch_ram_locations =
1449                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1450         adev->gfx.rlc.reg_restore_list_size =
1451                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1452         adev->gfx.rlc.reg_list_format_start =
1453                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1454         adev->gfx.rlc.reg_list_format_separate_start =
1455                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1456         adev->gfx.rlc.starting_offsets_start =
1457                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1458         adev->gfx.rlc.reg_list_format_size_bytes =
1459                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1460         adev->gfx.rlc.reg_list_size_bytes =
1461                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1462         adev->gfx.rlc.register_list_format =
1463                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1464                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1465         if (!adev->gfx.rlc.register_list_format) {
1466                 err = -ENOMEM;
1467                 goto out;
1468         }
1469
1470         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1471                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1472         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1473                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1474
1475         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1476
1477         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1478                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1479         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1480                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1481
1482         if (adev->gfx.rlc.is_rlc_v2_1)
1483                 gfx_v9_0_init_rlc_ext_microcode(adev);
1484
1485         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1486                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1487                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1488                 info->fw = adev->gfx.rlc_fw;
1489                 header = (const struct common_firmware_header *)info->fw->data;
1490                 adev->firmware.fw_size +=
1491                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1492
1493                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1494                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1495                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1496                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1497                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1498                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1499                         info->fw = adev->gfx.rlc_fw;
1500                         adev->firmware.fw_size +=
1501                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1502
1503                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1504                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1505                         info->fw = adev->gfx.rlc_fw;
1506                         adev->firmware.fw_size +=
1507                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1508
1509                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1510                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1511                         info->fw = adev->gfx.rlc_fw;
1512                         adev->firmware.fw_size +=
1513                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1514                 }
1515         }
1516
1517 out:
1518         if (err) {
1519                 dev_err(adev->dev,
1520                         "gfx9: Failed to load firmware \"%s\"\n",
1521                         fw_name);
1522                 release_firmware(adev->gfx.rlc_fw);
1523                 adev->gfx.rlc_fw = NULL;
1524         }
1525         return err;
1526 }
1527
1528 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1529 {
1530         if (adev->asic_type == CHIP_ALDEBARAN ||
1531             adev->asic_type == CHIP_ARCTURUS ||
1532             adev->asic_type == CHIP_RENOIR)
1533                 return false;
1534
1535         return true;
1536 }
1537
1538 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1539                                           const char *chip_name)
1540 {
1541         char fw_name[30];
1542         int err;
1543         struct amdgpu_firmware_info *info = NULL;
1544         const struct common_firmware_header *header = NULL;
1545         const struct gfx_firmware_header_v1_0 *cp_hdr;
1546
1547         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1548         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1549         if (err)
1550                 goto out;
1551         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1552         if (err)
1553                 goto out;
1554         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1555         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1556         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1557
1558
1559         if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1560                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1561                 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1562                 if (!err) {
1563                         err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1564                         if (err)
1565                                 goto out;
1566                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1567                         adev->gfx.mec2_fw->data;
1568                         adev->gfx.mec2_fw_version =
1569                         le32_to_cpu(cp_hdr->header.ucode_version);
1570                         adev->gfx.mec2_feature_version =
1571                         le32_to_cpu(cp_hdr->ucode_feature_version);
1572                 } else {
1573                         err = 0;
1574                         adev->gfx.mec2_fw = NULL;
1575                 }
1576         }
1577
1578         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1579                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1580                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1581                 info->fw = adev->gfx.mec_fw;
1582                 header = (const struct common_firmware_header *)info->fw->data;
1583                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1584                 adev->firmware.fw_size +=
1585                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1586
1587                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1588                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1589                 info->fw = adev->gfx.mec_fw;
1590                 adev->firmware.fw_size +=
1591                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1592
1593                 if (adev->gfx.mec2_fw) {
1594                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1595                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1596                         info->fw = adev->gfx.mec2_fw;
1597                         header = (const struct common_firmware_header *)info->fw->data;
1598                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1599                         adev->firmware.fw_size +=
1600                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1601
1602                         /* TODO: Determine if MEC2 JT FW loading can be removed
1603                                  for all GFX V9 asic and above */
1604                         if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1605                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1606                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1607                                 info->fw = adev->gfx.mec2_fw;
1608                                 adev->firmware.fw_size +=
1609                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1610                                         PAGE_SIZE);
1611                         }
1612                 }
1613         }
1614
1615 out:
1616         gfx_v9_0_check_if_need_gfxoff(adev);
1617         gfx_v9_0_check_fw_write_wait(adev);
1618         if (err) {
1619                 dev_err(adev->dev,
1620                         "gfx9: Failed to load firmware \"%s\"\n",
1621                         fw_name);
1622                 release_firmware(adev->gfx.mec_fw);
1623                 adev->gfx.mec_fw = NULL;
1624                 release_firmware(adev->gfx.mec2_fw);
1625                 adev->gfx.mec2_fw = NULL;
1626         }
1627         return err;
1628 }
1629
1630 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1631 {
1632         const char *chip_name;
1633         int r;
1634
1635         DRM_DEBUG("\n");
1636
1637         switch (adev->asic_type) {
1638         case CHIP_VEGA10:
1639                 chip_name = "vega10";
1640                 break;
1641         case CHIP_VEGA12:
1642                 chip_name = "vega12";
1643                 break;
1644         case CHIP_VEGA20:
1645                 chip_name = "vega20";
1646                 break;
1647         case CHIP_RAVEN:
1648                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1649                         chip_name = "raven2";
1650                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1651                         chip_name = "picasso";
1652                 else
1653                         chip_name = "raven";
1654                 break;
1655         case CHIP_ARCTURUS:
1656                 chip_name = "arcturus";
1657                 break;
1658         case CHIP_RENOIR:
1659                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1660                         chip_name = "renoir";
1661                 else
1662                         chip_name = "green_sardine";
1663                 break;
1664         case CHIP_ALDEBARAN:
1665                 chip_name = "aldebaran";
1666                 break;
1667         default:
1668                 BUG();
1669         }
1670
1671         /* No CPG in Arcturus */
1672         if (adev->gfx.num_gfx_rings) {
1673                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1674                 if (r)
1675                         return r;
1676         }
1677
1678         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1679         if (r)
1680                 return r;
1681
1682         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1683         if (r)
1684                 return r;
1685
1686         return r;
1687 }
1688
1689 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1690 {
1691         u32 count = 0;
1692         const struct cs_section_def *sect = NULL;
1693         const struct cs_extent_def *ext = NULL;
1694
1695         /* begin clear state */
1696         count += 2;
1697         /* context control state */
1698         count += 3;
1699
1700         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1701                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1702                         if (sect->id == SECT_CONTEXT)
1703                                 count += 2 + ext->reg_count;
1704                         else
1705                                 return 0;
1706                 }
1707         }
1708
1709         /* end clear state */
1710         count += 2;
1711         /* clear state */
1712         count += 2;
1713
1714         return count;
1715 }
1716
1717 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1718                                     volatile u32 *buffer)
1719 {
1720         u32 count = 0, i;
1721         const struct cs_section_def *sect = NULL;
1722         const struct cs_extent_def *ext = NULL;
1723
1724         if (adev->gfx.rlc.cs_data == NULL)
1725                 return;
1726         if (buffer == NULL)
1727                 return;
1728
1729         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1730         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1731
1732         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1733         buffer[count++] = cpu_to_le32(0x80000000);
1734         buffer[count++] = cpu_to_le32(0x80000000);
1735
1736         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1737                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1738                         if (sect->id == SECT_CONTEXT) {
1739                                 buffer[count++] =
1740                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1741                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1742                                                 PACKET3_SET_CONTEXT_REG_START);
1743                                 for (i = 0; i < ext->reg_count; i++)
1744                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1745                         } else {
1746                                 return;
1747                         }
1748                 }
1749         }
1750
1751         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1752         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1753
1754         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1755         buffer[count++] = cpu_to_le32(0);
1756 }
1757
1758 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1759 {
1760         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1761         uint32_t pg_always_on_cu_num = 2;
1762         uint32_t always_on_cu_num;
1763         uint32_t i, j, k;
1764         uint32_t mask, cu_bitmap, counter;
1765
1766         if (adev->flags & AMD_IS_APU)
1767                 always_on_cu_num = 4;
1768         else if (adev->asic_type == CHIP_VEGA12)
1769                 always_on_cu_num = 8;
1770         else
1771                 always_on_cu_num = 12;
1772
1773         mutex_lock(&adev->grbm_idx_mutex);
1774         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1775                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1776                         mask = 1;
1777                         cu_bitmap = 0;
1778                         counter = 0;
1779                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1780
1781                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1782                                 if (cu_info->bitmap[i][j] & mask) {
1783                                         if (counter == pg_always_on_cu_num)
1784                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1785                                         if (counter < always_on_cu_num)
1786                                                 cu_bitmap |= mask;
1787                                         else
1788                                                 break;
1789                                         counter++;
1790                                 }
1791                                 mask <<= 1;
1792                         }
1793
1794                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1795                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1796                 }
1797         }
1798         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1799         mutex_unlock(&adev->grbm_idx_mutex);
1800 }
1801
1802 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1803 {
1804         uint32_t data;
1805
1806         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1807         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1808         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1809         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1810         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1811
1812         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1813         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1814
1815         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1816         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1817
1818         mutex_lock(&adev->grbm_idx_mutex);
1819         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1820         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1821         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1822
1823         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1824         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1825         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1826         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1827         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1828
1829         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1830         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1831         data &= 0x0000FFFF;
1832         data |= 0x00C00000;
1833         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1834
1835         /*
1836          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1837          * programmed in gfx_v9_0_init_always_on_cu_mask()
1838          */
1839
1840         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1841          * but used for RLC_LB_CNTL configuration */
1842         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1843         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1844         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1845         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1846         mutex_unlock(&adev->grbm_idx_mutex);
1847
1848         gfx_v9_0_init_always_on_cu_mask(adev);
1849 }
1850
1851 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1852 {
1853         uint32_t data;
1854
1855         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1856         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1857         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1858         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1859         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1860
1861         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1862         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1863
1864         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1865         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1866
1867         mutex_lock(&adev->grbm_idx_mutex);
1868         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1869         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1870         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1871
1872         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1873         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1874         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1875         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1876         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1877
1878         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1879         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1880         data &= 0x0000FFFF;
1881         data |= 0x00C00000;
1882         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1883
1884         /*
1885          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1886          * programmed in gfx_v9_0_init_always_on_cu_mask()
1887          */
1888
1889         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1890          * but used for RLC_LB_CNTL configuration */
1891         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1892         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1893         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1894         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1895         mutex_unlock(&adev->grbm_idx_mutex);
1896
1897         gfx_v9_0_init_always_on_cu_mask(adev);
1898 }
1899
1900 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1901 {
1902         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1903 }
1904
1905 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1906 {
1907         if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1908                 return 5;
1909         else
1910                 return 4;
1911 }
1912
1913 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1914 {
1915         const struct cs_section_def *cs_data;
1916         int r;
1917
1918         adev->gfx.rlc.cs_data = gfx9_cs_data;
1919
1920         cs_data = adev->gfx.rlc.cs_data;
1921
1922         if (cs_data) {
1923                 /* init clear state block */
1924                 r = amdgpu_gfx_rlc_init_csb(adev);
1925                 if (r)
1926                         return r;
1927         }
1928
1929         if (adev->flags & AMD_IS_APU) {
1930                 /* TODO: double check the cp_table_size for RV */
1931                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1932                 r = amdgpu_gfx_rlc_init_cpt(adev);
1933                 if (r)
1934                         return r;
1935         }
1936
1937         switch (adev->asic_type) {
1938         case CHIP_RAVEN:
1939                 gfx_v9_0_init_lbpw(adev);
1940                 break;
1941         case CHIP_VEGA20:
1942                 gfx_v9_4_init_lbpw(adev);
1943                 break;
1944         default:
1945                 break;
1946         }
1947
1948         /* init spm vmid with 0xf */
1949         if (adev->gfx.rlc.funcs->update_spm_vmid)
1950                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1951
1952         return 0;
1953 }
1954
1955 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1956 {
1957         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1958         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1959 }
1960
1961 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1962 {
1963         int r;
1964         u32 *hpd;
1965         const __le32 *fw_data;
1966         unsigned fw_size;
1967         u32 *fw;
1968         size_t mec_hpd_size;
1969
1970         const struct gfx_firmware_header_v1_0 *mec_hdr;
1971
1972         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1973
1974         /* take ownership of the relevant compute queues */
1975         amdgpu_gfx_compute_queue_acquire(adev);
1976         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1977         if (mec_hpd_size) {
1978                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1979                                               AMDGPU_GEM_DOMAIN_VRAM,
1980                                               &adev->gfx.mec.hpd_eop_obj,
1981                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1982                                               (void **)&hpd);
1983                 if (r) {
1984                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1985                         gfx_v9_0_mec_fini(adev);
1986                         return r;
1987                 }
1988
1989                 memset(hpd, 0, mec_hpd_size);
1990
1991                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1992                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1993         }
1994
1995         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1996
1997         fw_data = (const __le32 *)
1998                 (adev->gfx.mec_fw->data +
1999                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2000         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
2001
2002         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
2003                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2004                                       &adev->gfx.mec.mec_fw_obj,
2005                                       &adev->gfx.mec.mec_fw_gpu_addr,
2006                                       (void **)&fw);
2007         if (r) {
2008                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
2009                 gfx_v9_0_mec_fini(adev);
2010                 return r;
2011         }
2012
2013         memcpy(fw, fw_data, fw_size);
2014
2015         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
2016         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
2017
2018         return 0;
2019 }
2020
2021 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
2022 {
2023         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2024                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2025                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2026                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
2027                 (SQ_IND_INDEX__FORCE_READ_MASK));
2028         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2029 }
2030
2031 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2032                            uint32_t wave, uint32_t thread,
2033                            uint32_t regno, uint32_t num, uint32_t *out)
2034 {
2035         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2036                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2037                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2038                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2039                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2040                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2041                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2042         while (num--)
2043                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2044 }
2045
2046 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2047 {
2048         /* type 1 wave data */
2049         dst[(*no_fields)++] = 1;
2050         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2051         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2052         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2053         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2054         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2055         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2056         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2057         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2058         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2059         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2060         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2061         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2062         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2063         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2064 }
2065
2066 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2067                                      uint32_t wave, uint32_t start,
2068                                      uint32_t size, uint32_t *dst)
2069 {
2070         wave_read_regs(
2071                 adev, simd, wave, 0,
2072                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2073 }
2074
2075 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2076                                      uint32_t wave, uint32_t thread,
2077                                      uint32_t start, uint32_t size,
2078                                      uint32_t *dst)
2079 {
2080         wave_read_regs(
2081                 adev, simd, wave, thread,
2082                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2083 }
2084
2085 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2086                                   u32 me, u32 pipe, u32 q, u32 vm)
2087 {
2088         soc15_grbm_select(adev, me, pipe, q, vm);
2089 }
2090
2091 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2092         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2093         .select_se_sh = &gfx_v9_0_select_se_sh,
2094         .read_wave_data = &gfx_v9_0_read_wave_data,
2095         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2096         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2097         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2098         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2099         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2100         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2101 };
2102
2103 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2104         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2105         .select_se_sh = &gfx_v9_0_select_se_sh,
2106         .read_wave_data = &gfx_v9_0_read_wave_data,
2107         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2108         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2109         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2110         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2111         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2112         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2113         .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
2114 };
2115
2116 static const struct amdgpu_gfx_funcs gfx_v9_4_2_gfx_funcs = {
2117         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2118         .select_se_sh = &gfx_v9_0_select_se_sh,
2119         .read_wave_data = &gfx_v9_0_read_wave_data,
2120         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2121         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2122         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2123         .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
2124         .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
2125         .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
2126         .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
2127         .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
2128         .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
2129 };
2130
2131 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2132 {
2133         u32 gb_addr_config;
2134         int err;
2135
2136         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2137
2138         switch (adev->asic_type) {
2139         case CHIP_VEGA10:
2140                 adev->gfx.config.max_hw_contexts = 8;
2141                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2142                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2143                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2144                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2145                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2146                 break;
2147         case CHIP_VEGA12:
2148                 adev->gfx.config.max_hw_contexts = 8;
2149                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2150                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2151                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2152                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2153                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2154                 DRM_INFO("fix gfx.config for vega12\n");
2155                 break;
2156         case CHIP_VEGA20:
2157                 adev->gfx.config.max_hw_contexts = 8;
2158                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2159                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2160                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2161                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2162                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2163                 gb_addr_config &= ~0xf3e777ff;
2164                 gb_addr_config |= 0x22014042;
2165                 /* check vbios table if gpu info is not available */
2166                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2167                 if (err)
2168                         return err;
2169                 break;
2170         case CHIP_RAVEN:
2171                 adev->gfx.config.max_hw_contexts = 8;
2172                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2173                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2174                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2175                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2176                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2177                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2178                 else
2179                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2180                 break;
2181         case CHIP_ARCTURUS:
2182                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2183                 adev->gfx.config.max_hw_contexts = 8;
2184                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2185                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2186                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2187                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2188                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2189                 gb_addr_config &= ~0xf3e777ff;
2190                 gb_addr_config |= 0x22014042;
2191                 break;
2192         case CHIP_RENOIR:
2193                 adev->gfx.config.max_hw_contexts = 8;
2194                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2195                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2196                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2197                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2198                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2199                 gb_addr_config &= ~0xf3e777ff;
2200                 gb_addr_config |= 0x22010042;
2201                 break;
2202         case CHIP_ALDEBARAN:
2203                 adev->gfx.funcs = &gfx_v9_4_2_gfx_funcs;
2204                 adev->gfx.config.max_hw_contexts = 8;
2205                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2206                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2207                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2208                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2209                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2210                 gb_addr_config &= ~0xf3e777ff;
2211                 gb_addr_config |= 0x22014042;
2212                 /* check vbios table if gpu info is not available */
2213                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2214                 if (err)
2215                         return err;
2216                 break;
2217         default:
2218                 BUG();
2219                 break;
2220         }
2221
2222         adev->gfx.config.gb_addr_config = gb_addr_config;
2223
2224         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2225                         REG_GET_FIELD(
2226                                         adev->gfx.config.gb_addr_config,
2227                                         GB_ADDR_CONFIG,
2228                                         NUM_PIPES);
2229
2230         adev->gfx.config.max_tile_pipes =
2231                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2232
2233         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2234                         REG_GET_FIELD(
2235                                         adev->gfx.config.gb_addr_config,
2236                                         GB_ADDR_CONFIG,
2237                                         NUM_BANKS);
2238         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2239                         REG_GET_FIELD(
2240                                         adev->gfx.config.gb_addr_config,
2241                                         GB_ADDR_CONFIG,
2242                                         MAX_COMPRESSED_FRAGS);
2243         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2244                         REG_GET_FIELD(
2245                                         adev->gfx.config.gb_addr_config,
2246                                         GB_ADDR_CONFIG,
2247                                         NUM_RB_PER_SE);
2248         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2249                         REG_GET_FIELD(
2250                                         adev->gfx.config.gb_addr_config,
2251                                         GB_ADDR_CONFIG,
2252                                         NUM_SHADER_ENGINES);
2253         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2254                         REG_GET_FIELD(
2255                                         adev->gfx.config.gb_addr_config,
2256                                         GB_ADDR_CONFIG,
2257                                         PIPE_INTERLEAVE_SIZE));
2258
2259         return 0;
2260 }
2261
2262 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2263                                       int mec, int pipe, int queue)
2264 {
2265         unsigned irq_type;
2266         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2267         unsigned int hw_prio;
2268
2269         ring = &adev->gfx.compute_ring[ring_id];
2270
2271         /* mec0 is me1 */
2272         ring->me = mec + 1;
2273         ring->pipe = pipe;
2274         ring->queue = queue;
2275
2276         ring->ring_obj = NULL;
2277         ring->use_doorbell = true;
2278         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2279         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2280                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2281         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2282
2283         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2284                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2285                 + ring->pipe;
2286         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2287                         AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2288         /* type-2 packets are deprecated on MEC, use type-3 instead */
2289         return amdgpu_ring_init(adev, ring, 1024,
2290                                 &adev->gfx.eop_irq, irq_type, hw_prio);
2291 }
2292
2293 static int gfx_v9_0_sw_init(void *handle)
2294 {
2295         int i, j, k, r, ring_id;
2296         struct amdgpu_ring *ring;
2297         struct amdgpu_kiq *kiq;
2298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2299
2300         switch (adev->asic_type) {
2301         case CHIP_VEGA10:
2302         case CHIP_VEGA12:
2303         case CHIP_VEGA20:
2304         case CHIP_RAVEN:
2305         case CHIP_ARCTURUS:
2306         case CHIP_RENOIR:
2307         case CHIP_ALDEBARAN:
2308                 adev->gfx.mec.num_mec = 2;
2309                 break;
2310         default:
2311                 adev->gfx.mec.num_mec = 1;
2312                 break;
2313         }
2314
2315         adev->gfx.mec.num_pipe_per_mec = 4;
2316         adev->gfx.mec.num_queue_per_pipe = 8;
2317
2318         /* EOP Event */
2319         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2320         if (r)
2321                 return r;
2322
2323         /* Privileged reg */
2324         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2325                               &adev->gfx.priv_reg_irq);
2326         if (r)
2327                 return r;
2328
2329         /* Privileged inst */
2330         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2331                               &adev->gfx.priv_inst_irq);
2332         if (r)
2333                 return r;
2334
2335         /* ECC error */
2336         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2337                               &adev->gfx.cp_ecc_error_irq);
2338         if (r)
2339                 return r;
2340
2341         /* FUE error */
2342         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2343                               &adev->gfx.cp_ecc_error_irq);
2344         if (r)
2345                 return r;
2346
2347         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2348
2349         gfx_v9_0_scratch_init(adev);
2350
2351         r = gfx_v9_0_init_microcode(adev);
2352         if (r) {
2353                 DRM_ERROR("Failed to load gfx firmware!\n");
2354                 return r;
2355         }
2356
2357         r = adev->gfx.rlc.funcs->init(adev);
2358         if (r) {
2359                 DRM_ERROR("Failed to init rlc BOs!\n");
2360                 return r;
2361         }
2362
2363         r = gfx_v9_0_mec_init(adev);
2364         if (r) {
2365                 DRM_ERROR("Failed to init MEC BOs!\n");
2366                 return r;
2367         }
2368
2369         /* set up the gfx ring */
2370         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2371                 ring = &adev->gfx.gfx_ring[i];
2372                 ring->ring_obj = NULL;
2373                 if (!i)
2374                         sprintf(ring->name, "gfx");
2375                 else
2376                         sprintf(ring->name, "gfx_%d", i);
2377                 ring->use_doorbell = true;
2378                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2379                 r = amdgpu_ring_init(adev, ring, 1024,
2380                                      &adev->gfx.eop_irq,
2381                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2382                                      AMDGPU_RING_PRIO_DEFAULT);
2383                 if (r)
2384                         return r;
2385         }
2386
2387         /* set up the compute queues - allocate horizontally across pipes */
2388         ring_id = 0;
2389         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2390                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2391                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2392                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2393                                         continue;
2394
2395                                 r = gfx_v9_0_compute_ring_init(adev,
2396                                                                ring_id,
2397                                                                i, k, j);
2398                                 if (r)
2399                                         return r;
2400
2401                                 ring_id++;
2402                         }
2403                 }
2404         }
2405
2406         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2407         if (r) {
2408                 DRM_ERROR("Failed to init KIQ BOs!\n");
2409                 return r;
2410         }
2411
2412         kiq = &adev->gfx.kiq;
2413         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2414         if (r)
2415                 return r;
2416
2417         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2418         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2419         if (r)
2420                 return r;
2421
2422         adev->gfx.ce_ram_size = 0x8000;
2423
2424         r = gfx_v9_0_gpu_early_init(adev);
2425         if (r)
2426                 return r;
2427
2428         return 0;
2429 }
2430
2431
2432 static int gfx_v9_0_sw_fini(void *handle)
2433 {
2434         int i;
2435         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2436
2437         amdgpu_gfx_ras_fini(adev);
2438
2439         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2440                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2441         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2442                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2443
2444         amdgpu_gfx_mqd_sw_fini(adev);
2445         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2446         amdgpu_gfx_kiq_fini(adev);
2447
2448         gfx_v9_0_mec_fini(adev);
2449         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2450         if (adev->flags & AMD_IS_APU) {
2451                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2452                                 &adev->gfx.rlc.cp_table_gpu_addr,
2453                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2454         }
2455         gfx_v9_0_free_microcode(adev);
2456
2457         return 0;
2458 }
2459
2460
2461 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2462 {
2463         /* TODO */
2464 }
2465
2466 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2467                            u32 instance)
2468 {
2469         u32 data;
2470
2471         if (instance == 0xffffffff)
2472                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2473         else
2474                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2475
2476         if (se_num == 0xffffffff)
2477                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2478         else
2479                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2480
2481         if (sh_num == 0xffffffff)
2482                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2483         else
2484                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2485
2486         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2487 }
2488
2489 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2490 {
2491         u32 data, mask;
2492
2493         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2494         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2495
2496         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2497         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2498
2499         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2500                                          adev->gfx.config.max_sh_per_se);
2501
2502         return (~data) & mask;
2503 }
2504
2505 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2506 {
2507         int i, j;
2508         u32 data;
2509         u32 active_rbs = 0;
2510         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2511                                         adev->gfx.config.max_sh_per_se;
2512
2513         mutex_lock(&adev->grbm_idx_mutex);
2514         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2515                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2516                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2517                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2518                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2519                                                rb_bitmap_width_per_sh);
2520                 }
2521         }
2522         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2523         mutex_unlock(&adev->grbm_idx_mutex);
2524
2525         adev->gfx.config.backend_enable_mask = active_rbs;
2526         adev->gfx.config.num_rbs = hweight32(active_rbs);
2527 }
2528
2529 #define DEFAULT_SH_MEM_BASES    (0x6000)
2530 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2531 {
2532         int i;
2533         uint32_t sh_mem_config;
2534         uint32_t sh_mem_bases;
2535
2536         /*
2537          * Configure apertures:
2538          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2539          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2540          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2541          */
2542         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2543
2544         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2545                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2546                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2547
2548         mutex_lock(&adev->srbm_mutex);
2549         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2550                 soc15_grbm_select(adev, 0, 0, 0, i);
2551                 /* CP and shaders */
2552                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2553                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2554         }
2555         soc15_grbm_select(adev, 0, 0, 0, 0);
2556         mutex_unlock(&adev->srbm_mutex);
2557
2558         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2559            acccess. These should be enabled by FW for target VMIDs. */
2560         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2561                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2562                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2563                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2564                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2565         }
2566 }
2567
2568 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2569 {
2570         int vmid;
2571
2572         /*
2573          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2574          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2575          * the driver can enable them for graphics. VMID0 should maintain
2576          * access so that HWS firmware can save/restore entries.
2577          */
2578         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2579                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2580                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2581                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2582                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2583         }
2584 }
2585
2586 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2587 {
2588         uint32_t tmp;
2589
2590         switch (adev->asic_type) {
2591         case CHIP_ARCTURUS:
2592                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2593                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2594                                         DISABLE_BARRIER_WAITCNT, 1);
2595                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2596                 break;
2597         default:
2598                 break;
2599         }
2600 }
2601
2602 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2603 {
2604         u32 tmp;
2605         int i;
2606
2607         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2608
2609         gfx_v9_0_tiling_mode_table_init(adev);
2610
2611         gfx_v9_0_setup_rb(adev);
2612         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2613         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2614
2615         /* XXX SH_MEM regs */
2616         /* where to put LDS, scratch, GPUVM in FSA64 space */
2617         mutex_lock(&adev->srbm_mutex);
2618         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2619                 soc15_grbm_select(adev, 0, 0, 0, i);
2620                 /* CP and shaders */
2621                 if (i == 0) {
2622                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2623                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2624                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2625                                             !!adev->gmc.noretry);
2626                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2627                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2628                 } else {
2629                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2630                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2631                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2632                                             !!adev->gmc.noretry);
2633                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2634                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2635                                 (adev->gmc.private_aperture_start >> 48));
2636                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2637                                 (adev->gmc.shared_aperture_start >> 48));
2638                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2639                 }
2640         }
2641         soc15_grbm_select(adev, 0, 0, 0, 0);
2642
2643         mutex_unlock(&adev->srbm_mutex);
2644
2645         gfx_v9_0_init_compute_vmid(adev);
2646         gfx_v9_0_init_gds_vmid(adev);
2647         gfx_v9_0_init_sq_config(adev);
2648 }
2649
2650 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2651 {
2652         u32 i, j, k;
2653         u32 mask;
2654
2655         mutex_lock(&adev->grbm_idx_mutex);
2656         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2657                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2658                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2659                         for (k = 0; k < adev->usec_timeout; k++) {
2660                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2661                                         break;
2662                                 udelay(1);
2663                         }
2664                         if (k == adev->usec_timeout) {
2665                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2666                                                       0xffffffff, 0xffffffff);
2667                                 mutex_unlock(&adev->grbm_idx_mutex);
2668                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2669                                          i, j);
2670                                 return;
2671                         }
2672                 }
2673         }
2674         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2675         mutex_unlock(&adev->grbm_idx_mutex);
2676
2677         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2678                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2679                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2680                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2681         for (k = 0; k < adev->usec_timeout; k++) {
2682                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2683                         break;
2684                 udelay(1);
2685         }
2686 }
2687
2688 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2689                                                bool enable)
2690 {
2691         u32 tmp;
2692
2693         /* These interrupts should be enabled to drive DS clock */
2694
2695         tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2696
2697         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2698         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2699         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2700         if(adev->gfx.num_gfx_rings)
2701                 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2702
2703         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2704 }
2705
2706 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2707 {
2708         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2709         /* csib */
2710         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2711                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2712         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2713                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2714         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2715                         adev->gfx.rlc.clear_state_size);
2716 }
2717
2718 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2719                                 int indirect_offset,
2720                                 int list_size,
2721                                 int *unique_indirect_regs,
2722                                 int unique_indirect_reg_count,
2723                                 int *indirect_start_offsets,
2724                                 int *indirect_start_offsets_count,
2725                                 int max_start_offsets_count)
2726 {
2727         int idx;
2728
2729         for (; indirect_offset < list_size; indirect_offset++) {
2730                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2731                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2732                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2733
2734                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2735                         indirect_offset += 2;
2736
2737                         /* look for the matching indice */
2738                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2739                                 if (unique_indirect_regs[idx] ==
2740                                         register_list_format[indirect_offset] ||
2741                                         !unique_indirect_regs[idx])
2742                                         break;
2743                         }
2744
2745                         BUG_ON(idx >= unique_indirect_reg_count);
2746
2747                         if (!unique_indirect_regs[idx])
2748                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2749
2750                         indirect_offset++;
2751                 }
2752         }
2753 }
2754
2755 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2756 {
2757         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2758         int unique_indirect_reg_count = 0;
2759
2760         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2761         int indirect_start_offsets_count = 0;
2762
2763         int list_size = 0;
2764         int i = 0, j = 0;
2765         u32 tmp = 0;
2766
2767         u32 *register_list_format =
2768                 kmemdup(adev->gfx.rlc.register_list_format,
2769                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2770         if (!register_list_format)
2771                 return -ENOMEM;
2772
2773         /* setup unique_indirect_regs array and indirect_start_offsets array */
2774         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2775         gfx_v9_1_parse_ind_reg_list(register_list_format,
2776                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2777                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2778                                     unique_indirect_regs,
2779                                     unique_indirect_reg_count,
2780                                     indirect_start_offsets,
2781                                     &indirect_start_offsets_count,
2782                                     ARRAY_SIZE(indirect_start_offsets));
2783
2784         /* enable auto inc in case it is disabled */
2785         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2786         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2787         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2788
2789         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2790         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2791                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2792         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2793                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2794                         adev->gfx.rlc.register_restore[i]);
2795
2796         /* load indirect register */
2797         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2798                 adev->gfx.rlc.reg_list_format_start);
2799
2800         /* direct register portion */
2801         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2802                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2803                         register_list_format[i]);
2804
2805         /* indirect register portion */
2806         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2807                 if (register_list_format[i] == 0xFFFFFFFF) {
2808                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2809                         continue;
2810                 }
2811
2812                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2813                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2814
2815                 for (j = 0; j < unique_indirect_reg_count; j++) {
2816                         if (register_list_format[i] == unique_indirect_regs[j]) {
2817                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2818                                 break;
2819                         }
2820                 }
2821
2822                 BUG_ON(j >= unique_indirect_reg_count);
2823
2824                 i++;
2825         }
2826
2827         /* set save/restore list size */
2828         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2829         list_size = list_size >> 1;
2830         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2831                 adev->gfx.rlc.reg_restore_list_size);
2832         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2833
2834         /* write the starting offsets to RLC scratch ram */
2835         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2836                 adev->gfx.rlc.starting_offsets_start);
2837         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2838                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2839                        indirect_start_offsets[i]);
2840
2841         /* load unique indirect regs*/
2842         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2843                 if (unique_indirect_regs[i] != 0) {
2844                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2845                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2846                                unique_indirect_regs[i] & 0x3FFFF);
2847
2848                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2849                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2850                                unique_indirect_regs[i] >> 20);
2851                 }
2852         }
2853
2854         kfree(register_list_format);
2855         return 0;
2856 }
2857
2858 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2859 {
2860         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2861 }
2862
2863 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2864                                              bool enable)
2865 {
2866         uint32_t data = 0;
2867         uint32_t default_data = 0;
2868
2869         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2870         if (enable) {
2871                 /* enable GFXIP control over CGPG */
2872                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2873                 if(default_data != data)
2874                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2875
2876                 /* update status */
2877                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2878                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2879                 if(default_data != data)
2880                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2881         } else {
2882                 /* restore GFXIP control over GCPG */
2883                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2884                 if(default_data != data)
2885                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2886         }
2887 }
2888
2889 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2890 {
2891         uint32_t data = 0;
2892
2893         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2894                               AMD_PG_SUPPORT_GFX_SMG |
2895                               AMD_PG_SUPPORT_GFX_DMG)) {
2896                 /* init IDLE_POLL_COUNT = 60 */
2897                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2898                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2899                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2900                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2901
2902                 /* init RLC PG Delay */
2903                 data = 0;
2904                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2905                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2906                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2907                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2908                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2909
2910                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2911                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2912                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2913                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2914
2915                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2916                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2917                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2918                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2919
2920                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2921                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2922
2923                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2924                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2925                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2926                 if (adev->asic_type != CHIP_RENOIR)
2927                         pwr_10_0_gfxip_control_over_cgpg(adev, true);
2928         }
2929 }
2930
2931 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2932                                                 bool enable)
2933 {
2934         uint32_t data = 0;
2935         uint32_t default_data = 0;
2936
2937         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2938         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2939                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2940                              enable ? 1 : 0);
2941         if (default_data != data)
2942                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2943 }
2944
2945 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2946                                                 bool enable)
2947 {
2948         uint32_t data = 0;
2949         uint32_t default_data = 0;
2950
2951         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2952         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2953                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2954                              enable ? 1 : 0);
2955         if(default_data != data)
2956                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2957 }
2958
2959 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2960                                         bool enable)
2961 {
2962         uint32_t data = 0;
2963         uint32_t default_data = 0;
2964
2965         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2966         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2967                              CP_PG_DISABLE,
2968                              enable ? 0 : 1);
2969         if(default_data != data)
2970                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2971 }
2972
2973 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2974                                                 bool enable)
2975 {
2976         uint32_t data, default_data;
2977
2978         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2979         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2980                              GFX_POWER_GATING_ENABLE,
2981                              enable ? 1 : 0);
2982         if(default_data != data)
2983                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2984 }
2985
2986 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2987                                                 bool enable)
2988 {
2989         uint32_t data, default_data;
2990
2991         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2992         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2993                              GFX_PIPELINE_PG_ENABLE,
2994                              enable ? 1 : 0);
2995         if(default_data != data)
2996                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2997
2998         if (!enable)
2999                 /* read any GFX register to wake up GFX */
3000                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3001 }
3002
3003 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3004                                                        bool enable)
3005 {
3006         uint32_t data, default_data;
3007
3008         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3009         data = REG_SET_FIELD(data, RLC_PG_CNTL,
3010                              STATIC_PER_CU_PG_ENABLE,
3011                              enable ? 1 : 0);
3012         if(default_data != data)
3013                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3014 }
3015
3016 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3017                                                 bool enable)
3018 {
3019         uint32_t data, default_data;
3020
3021         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3022         data = REG_SET_FIELD(data, RLC_PG_CNTL,
3023                              DYN_PER_CU_PG_ENABLE,
3024                              enable ? 1 : 0);
3025         if(default_data != data)
3026                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3027 }
3028
3029 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3030 {
3031         gfx_v9_0_init_csb(adev);
3032
3033         /*
3034          * Rlc save restore list is workable since v2_1.
3035          * And it's needed by gfxoff feature.
3036          */
3037         if (adev->gfx.rlc.is_rlc_v2_1) {
3038                 if (adev->asic_type == CHIP_VEGA12 ||
3039                     (adev->apu_flags & AMD_APU_IS_RAVEN2))
3040                         gfx_v9_1_init_rlc_save_restore_list(adev);
3041                 gfx_v9_0_enable_save_restore_machine(adev);
3042         }
3043
3044         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3045                               AMD_PG_SUPPORT_GFX_SMG |
3046                               AMD_PG_SUPPORT_GFX_DMG |
3047                               AMD_PG_SUPPORT_CP |
3048                               AMD_PG_SUPPORT_GDS |
3049                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
3050                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
3051                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
3052                 gfx_v9_0_init_gfx_power_gating(adev);
3053         }
3054 }
3055
3056 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3057 {
3058         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3059         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3060         gfx_v9_0_wait_for_rlc_serdes(adev);
3061 }
3062
3063 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3064 {
3065         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3066         udelay(50);
3067         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3068         udelay(50);
3069 }
3070
3071 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3072 {
3073 #ifdef AMDGPU_RLC_DEBUG_RETRY
3074         u32 rlc_ucode_ver;
3075 #endif
3076
3077         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3078         udelay(50);
3079
3080         /* carrizo do enable cp interrupt after cp inited */
3081         if (!(adev->flags & AMD_IS_APU)) {
3082                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3083                 udelay(50);
3084         }
3085
3086 #ifdef AMDGPU_RLC_DEBUG_RETRY
3087         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3088         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3089         if(rlc_ucode_ver == 0x108) {
3090                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3091                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3092                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3093                  * default is 0x9C4 to create a 100us interval */
3094                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3095                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3096                  * to disable the page fault retry interrupts, default is
3097                  * 0x100 (256) */
3098                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3099         }
3100 #endif
3101 }
3102
3103 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3104 {
3105         const struct rlc_firmware_header_v2_0 *hdr;
3106         const __le32 *fw_data;
3107         unsigned i, fw_size;
3108
3109         if (!adev->gfx.rlc_fw)
3110                 return -EINVAL;
3111
3112         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3113         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3114
3115         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3116                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3117         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3118
3119         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3120                         RLCG_UCODE_LOADING_START_ADDRESS);
3121         for (i = 0; i < fw_size; i++)
3122                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3123         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3124
3125         return 0;
3126 }
3127
3128 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3129 {
3130         int r;
3131
3132         if (amdgpu_sriov_vf(adev)) {
3133                 gfx_v9_0_init_csb(adev);
3134                 return 0;
3135         }
3136
3137         adev->gfx.rlc.funcs->stop(adev);
3138
3139         /* disable CG */
3140         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3141
3142         gfx_v9_0_init_pg(adev);
3143
3144         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3145                 /* legacy rlc firmware loading */
3146                 r = gfx_v9_0_rlc_load_microcode(adev);
3147                 if (r)
3148                         return r;
3149         }
3150
3151         switch (adev->asic_type) {
3152         case CHIP_RAVEN:
3153                 if (amdgpu_lbpw == 0)
3154                         gfx_v9_0_enable_lbpw(adev, false);
3155                 else
3156                         gfx_v9_0_enable_lbpw(adev, true);
3157                 break;
3158         case CHIP_VEGA20:
3159                 if (amdgpu_lbpw > 0)
3160                         gfx_v9_0_enable_lbpw(adev, true);
3161                 else
3162                         gfx_v9_0_enable_lbpw(adev, false);
3163                 break;
3164         default:
3165                 break;
3166         }
3167
3168         adev->gfx.rlc.funcs->start(adev);
3169
3170         return 0;
3171 }
3172
3173 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3174 {
3175         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3176
3177         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3178         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3179         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3180         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3181         udelay(50);
3182 }
3183
3184 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3185 {
3186         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3187         const struct gfx_firmware_header_v1_0 *ce_hdr;
3188         const struct gfx_firmware_header_v1_0 *me_hdr;
3189         const __le32 *fw_data;
3190         unsigned i, fw_size;
3191
3192         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3193                 return -EINVAL;
3194
3195         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3196                 adev->gfx.pfp_fw->data;
3197         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3198                 adev->gfx.ce_fw->data;
3199         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3200                 adev->gfx.me_fw->data;
3201
3202         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3203         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3204         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3205
3206         gfx_v9_0_cp_gfx_enable(adev, false);
3207
3208         /* PFP */
3209         fw_data = (const __le32 *)
3210                 (adev->gfx.pfp_fw->data +
3211                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3212         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3213         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3214         for (i = 0; i < fw_size; i++)
3215                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3216         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3217
3218         /* CE */
3219         fw_data = (const __le32 *)
3220                 (adev->gfx.ce_fw->data +
3221                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3222         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3223         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3224         for (i = 0; i < fw_size; i++)
3225                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3226         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3227
3228         /* ME */
3229         fw_data = (const __le32 *)
3230                 (adev->gfx.me_fw->data +
3231                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3232         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3233         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3234         for (i = 0; i < fw_size; i++)
3235                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3236         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3237
3238         return 0;
3239 }
3240
3241 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3242 {
3243         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3244         const struct cs_section_def *sect = NULL;
3245         const struct cs_extent_def *ext = NULL;
3246         int r, i, tmp;
3247
3248         /* init the CP */
3249         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3250         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3251
3252         gfx_v9_0_cp_gfx_enable(adev, true);
3253
3254         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3255         if (r) {
3256                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3257                 return r;
3258         }
3259
3260         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3261         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3262
3263         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3264         amdgpu_ring_write(ring, 0x80000000);
3265         amdgpu_ring_write(ring, 0x80000000);
3266
3267         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3268                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3269                         if (sect->id == SECT_CONTEXT) {
3270                                 amdgpu_ring_write(ring,
3271                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3272                                                ext->reg_count));
3273                                 amdgpu_ring_write(ring,
3274                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3275                                 for (i = 0; i < ext->reg_count; i++)
3276                                         amdgpu_ring_write(ring, ext->extent[i]);
3277                         }
3278                 }
3279         }
3280
3281         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3282         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3283
3284         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3285         amdgpu_ring_write(ring, 0);
3286
3287         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3288         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3289         amdgpu_ring_write(ring, 0x8000);
3290         amdgpu_ring_write(ring, 0x8000);
3291
3292         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3293         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3294                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3295         amdgpu_ring_write(ring, tmp);
3296         amdgpu_ring_write(ring, 0);
3297
3298         amdgpu_ring_commit(ring);
3299
3300         return 0;
3301 }
3302
3303 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3304 {
3305         struct amdgpu_ring *ring;
3306         u32 tmp;
3307         u32 rb_bufsz;
3308         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3309
3310         /* Set the write pointer delay */
3311         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3312
3313         /* set the RB to use vmid 0 */
3314         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3315
3316         /* Set ring buffer size */
3317         ring = &adev->gfx.gfx_ring[0];
3318         rb_bufsz = order_base_2(ring->ring_size / 8);
3319         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3320         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3321 #ifdef __BIG_ENDIAN
3322         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3323 #endif
3324         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3325
3326         /* Initialize the ring buffer's write pointers */
3327         ring->wptr = 0;
3328         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3329         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3330
3331         /* set the wb address wether it's enabled or not */
3332         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3333         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3334         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3335
3336         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3337         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3338         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3339
3340         mdelay(1);
3341         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3342
3343         rb_addr = ring->gpu_addr >> 8;
3344         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3345         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3346
3347         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3348         if (ring->use_doorbell) {
3349                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3350                                     DOORBELL_OFFSET, ring->doorbell_index);
3351                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3352                                     DOORBELL_EN, 1);
3353         } else {
3354                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3355         }
3356         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3357
3358         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3359                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3360         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3361
3362         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3363                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3364
3365
3366         /* start the ring */
3367         gfx_v9_0_cp_gfx_start(adev);
3368         ring->sched.ready = true;
3369
3370         return 0;
3371 }
3372
3373 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3374 {
3375         if (enable) {
3376                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3377         } else {
3378                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3379                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3380                 adev->gfx.kiq.ring.sched.ready = false;
3381         }
3382         udelay(50);
3383 }
3384
3385 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3386 {
3387         const struct gfx_firmware_header_v1_0 *mec_hdr;
3388         const __le32 *fw_data;
3389         unsigned i;
3390         u32 tmp;
3391
3392         if (!adev->gfx.mec_fw)
3393                 return -EINVAL;
3394
3395         gfx_v9_0_cp_compute_enable(adev, false);
3396
3397         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3398         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3399
3400         fw_data = (const __le32 *)
3401                 (adev->gfx.mec_fw->data +
3402                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3403         tmp = 0;
3404         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3405         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3406         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3407
3408         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3409                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3410         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3411                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3412
3413         /* MEC1 */
3414         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3415                          mec_hdr->jt_offset);
3416         for (i = 0; i < mec_hdr->jt_size; i++)
3417                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3418                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3419
3420         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3421                         adev->gfx.mec_fw_version);
3422         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3423
3424         return 0;
3425 }
3426
3427 /* KIQ functions */
3428 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3429 {
3430         uint32_t tmp;
3431         struct amdgpu_device *adev = ring->adev;
3432
3433         /* tell RLC which is KIQ queue */
3434         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3435         tmp &= 0xffffff00;
3436         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3437         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3438         tmp |= 0x80;
3439         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3440 }
3441
3442 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3443 {
3444         struct amdgpu_device *adev = ring->adev;
3445
3446         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3447                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3448                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3449                         mqd->cp_hqd_queue_priority =
3450                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3451                 }
3452         }
3453 }
3454
3455 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3456 {
3457         struct amdgpu_device *adev = ring->adev;
3458         struct v9_mqd *mqd = ring->mqd_ptr;
3459         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3460         uint32_t tmp;
3461
3462         mqd->header = 0xC0310800;
3463         mqd->compute_pipelinestat_enable = 0x00000001;
3464         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3465         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3466         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3467         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3468         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3469         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3470         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3471         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3472         mqd->compute_misc_reserved = 0x00000003;
3473
3474         mqd->dynamic_cu_mask_addr_lo =
3475                 lower_32_bits(ring->mqd_gpu_addr
3476                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3477         mqd->dynamic_cu_mask_addr_hi =
3478                 upper_32_bits(ring->mqd_gpu_addr
3479                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3480
3481         eop_base_addr = ring->eop_gpu_addr >> 8;
3482         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3483         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3484
3485         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3486         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3487         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3488                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3489
3490         mqd->cp_hqd_eop_control = tmp;
3491
3492         /* enable doorbell? */
3493         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3494
3495         if (ring->use_doorbell) {
3496                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3497                                     DOORBELL_OFFSET, ring->doorbell_index);
3498                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3499                                     DOORBELL_EN, 1);
3500                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3501                                     DOORBELL_SOURCE, 0);
3502                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3503                                     DOORBELL_HIT, 0);
3504         } else {
3505                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3506                                          DOORBELL_EN, 0);
3507         }
3508
3509         mqd->cp_hqd_pq_doorbell_control = tmp;
3510
3511         /* disable the queue if it's active */
3512         ring->wptr = 0;
3513         mqd->cp_hqd_dequeue_request = 0;
3514         mqd->cp_hqd_pq_rptr = 0;
3515         mqd->cp_hqd_pq_wptr_lo = 0;
3516         mqd->cp_hqd_pq_wptr_hi = 0;
3517
3518         /* set the pointer to the MQD */
3519         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3520         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3521
3522         /* set MQD vmid to 0 */
3523         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3524         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3525         mqd->cp_mqd_control = tmp;
3526
3527         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3528         hqd_gpu_addr = ring->gpu_addr >> 8;
3529         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3530         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3531
3532         /* set up the HQD, this is similar to CP_RB0_CNTL */
3533         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3534         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3535                             (order_base_2(ring->ring_size / 4) - 1));
3536         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3537                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3538 #ifdef __BIG_ENDIAN
3539         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3540 #endif
3541         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3542         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3543         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3544         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3545         mqd->cp_hqd_pq_control = tmp;
3546
3547         /* set the wb address whether it's enabled or not */
3548         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3549         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3550         mqd->cp_hqd_pq_rptr_report_addr_hi =
3551                 upper_32_bits(wb_gpu_addr) & 0xffff;
3552
3553         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3554         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3555         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3556         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3557
3558         tmp = 0;
3559         /* enable the doorbell if requested */
3560         if (ring->use_doorbell) {
3561                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3562                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3563                                 DOORBELL_OFFSET, ring->doorbell_index);
3564
3565                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3566                                          DOORBELL_EN, 1);
3567                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3568                                          DOORBELL_SOURCE, 0);
3569                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3570                                          DOORBELL_HIT, 0);
3571         }
3572
3573         mqd->cp_hqd_pq_doorbell_control = tmp;
3574
3575         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3576         ring->wptr = 0;
3577         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3578
3579         /* set the vmid for the queue */
3580         mqd->cp_hqd_vmid = 0;
3581
3582         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3583         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3584         mqd->cp_hqd_persistent_state = tmp;
3585
3586         /* set MIN_IB_AVAIL_SIZE */
3587         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3588         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3589         mqd->cp_hqd_ib_control = tmp;
3590
3591         /* set static priority for a queue/ring */
3592         gfx_v9_0_mqd_set_priority(ring, mqd);
3593         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3594
3595         /* map_queues packet doesn't need activate the queue,
3596          * so only kiq need set this field.
3597          */
3598         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3599                 mqd->cp_hqd_active = 1;
3600
3601         return 0;
3602 }
3603
3604 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3605 {
3606         struct amdgpu_device *adev = ring->adev;
3607         struct v9_mqd *mqd = ring->mqd_ptr;
3608         int j;
3609
3610         /* disable wptr polling */
3611         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3612
3613         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3614                mqd->cp_hqd_eop_base_addr_lo);
3615         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3616                mqd->cp_hqd_eop_base_addr_hi);
3617
3618         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3619         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3620                mqd->cp_hqd_eop_control);
3621
3622         /* enable doorbell? */
3623         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3624                mqd->cp_hqd_pq_doorbell_control);
3625
3626         /* disable the queue if it's active */
3627         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3628                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3629                 for (j = 0; j < adev->usec_timeout; j++) {
3630                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3631                                 break;
3632                         udelay(1);
3633                 }
3634                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3635                        mqd->cp_hqd_dequeue_request);
3636                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3637                        mqd->cp_hqd_pq_rptr);
3638                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3639                        mqd->cp_hqd_pq_wptr_lo);
3640                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3641                        mqd->cp_hqd_pq_wptr_hi);
3642         }
3643
3644         /* set the pointer to the MQD */
3645         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3646                mqd->cp_mqd_base_addr_lo);
3647         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3648                mqd->cp_mqd_base_addr_hi);
3649
3650         /* set MQD vmid to 0 */
3651         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3652                mqd->cp_mqd_control);
3653
3654         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3655         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3656                mqd->cp_hqd_pq_base_lo);
3657         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3658                mqd->cp_hqd_pq_base_hi);
3659
3660         /* set up the HQD, this is similar to CP_RB0_CNTL */
3661         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3662                mqd->cp_hqd_pq_control);
3663
3664         /* set the wb address whether it's enabled or not */
3665         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3666                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3667         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3668                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3669
3670         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3671         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3672                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3673         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3674                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3675
3676         /* enable the doorbell if requested */
3677         if (ring->use_doorbell) {
3678                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3679                                         (adev->doorbell_index.kiq * 2) << 2);
3680                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3681                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3682         }
3683
3684         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3685                mqd->cp_hqd_pq_doorbell_control);
3686
3687         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3688         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3689                mqd->cp_hqd_pq_wptr_lo);
3690         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3691                mqd->cp_hqd_pq_wptr_hi);
3692
3693         /* set the vmid for the queue */
3694         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3695
3696         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3697                mqd->cp_hqd_persistent_state);
3698
3699         /* activate the queue */
3700         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3701                mqd->cp_hqd_active);
3702
3703         if (ring->use_doorbell)
3704                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3705
3706         return 0;
3707 }
3708
3709 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3710 {
3711         struct amdgpu_device *adev = ring->adev;
3712         int j;
3713
3714         /* disable the queue if it's active */
3715         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3716
3717                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3718
3719                 for (j = 0; j < adev->usec_timeout; j++) {
3720                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3721                                 break;
3722                         udelay(1);
3723                 }
3724
3725                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3726                         DRM_DEBUG("KIQ dequeue request failed.\n");
3727
3728                         /* Manual disable if dequeue request times out */
3729                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3730                 }
3731
3732                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3733                       0);
3734         }
3735
3736         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3737         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3738         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3739         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3740         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3741         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3742         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3743         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3744
3745         return 0;
3746 }
3747
3748 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3749 {
3750         struct amdgpu_device *adev = ring->adev;
3751         struct v9_mqd *mqd = ring->mqd_ptr;
3752         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3753         struct v9_mqd *tmp_mqd;
3754
3755         gfx_v9_0_kiq_setting(ring);
3756
3757         /* GPU could be in bad state during probe, driver trigger the reset
3758          * after load the SMU, in this case , the mqd is not be initialized.
3759          * driver need to re-init the mqd.
3760          * check mqd->cp_hqd_pq_control since this value should not be 0
3761          */
3762         tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3763         if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3764                 /* for GPU_RESET case , reset MQD to a clean status */
3765                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3766                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3767
3768                 /* reset ring buffer */
3769                 ring->wptr = 0;
3770                 amdgpu_ring_clear_ring(ring);
3771
3772                 mutex_lock(&adev->srbm_mutex);
3773                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3774                 gfx_v9_0_kiq_init_register(ring);
3775                 soc15_grbm_select(adev, 0, 0, 0, 0);
3776                 mutex_unlock(&adev->srbm_mutex);
3777         } else {
3778                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3779                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3780                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3781                 mutex_lock(&adev->srbm_mutex);
3782                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3783                 gfx_v9_0_mqd_init(ring);
3784                 gfx_v9_0_kiq_init_register(ring);
3785                 soc15_grbm_select(adev, 0, 0, 0, 0);
3786                 mutex_unlock(&adev->srbm_mutex);
3787
3788                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3789                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3790         }
3791
3792         return 0;
3793 }
3794
3795 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3796 {
3797         struct amdgpu_device *adev = ring->adev;
3798         struct v9_mqd *mqd = ring->mqd_ptr;
3799         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3800         struct v9_mqd *tmp_mqd;
3801
3802         /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3803          * is not be initialized before
3804          */
3805         tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3806
3807         if (!tmp_mqd->cp_hqd_pq_control ||
3808             (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
3809                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3810                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3811                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3812                 mutex_lock(&adev->srbm_mutex);
3813                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3814                 gfx_v9_0_mqd_init(ring);
3815                 soc15_grbm_select(adev, 0, 0, 0, 0);
3816                 mutex_unlock(&adev->srbm_mutex);
3817
3818                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3819                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3820         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3821                 /* reset MQD to a clean status */
3822                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3823                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3824
3825                 /* reset ring buffer */
3826                 ring->wptr = 0;
3827                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3828                 amdgpu_ring_clear_ring(ring);
3829         } else {
3830                 amdgpu_ring_clear_ring(ring);
3831         }
3832
3833         return 0;
3834 }
3835
3836 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3837 {
3838         struct amdgpu_ring *ring;
3839         int r;
3840
3841         ring = &adev->gfx.kiq.ring;
3842
3843         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3844         if (unlikely(r != 0))
3845                 return r;
3846
3847         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3848         if (unlikely(r != 0))
3849                 return r;
3850
3851         gfx_v9_0_kiq_init_queue(ring);
3852         amdgpu_bo_kunmap(ring->mqd_obj);
3853         ring->mqd_ptr = NULL;
3854         amdgpu_bo_unreserve(ring->mqd_obj);
3855         ring->sched.ready = true;
3856         return 0;
3857 }
3858
3859 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3860 {
3861         struct amdgpu_ring *ring = NULL;
3862         int r = 0, i;
3863
3864         gfx_v9_0_cp_compute_enable(adev, true);
3865
3866         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3867                 ring = &adev->gfx.compute_ring[i];
3868
3869                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3870                 if (unlikely(r != 0))
3871                         goto done;
3872                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3873                 if (!r) {
3874                         r = gfx_v9_0_kcq_init_queue(ring);
3875                         amdgpu_bo_kunmap(ring->mqd_obj);
3876                         ring->mqd_ptr = NULL;
3877                 }
3878                 amdgpu_bo_unreserve(ring->mqd_obj);
3879                 if (r)
3880                         goto done;
3881         }
3882
3883         r = amdgpu_gfx_enable_kcq(adev);
3884 done:
3885         return r;
3886 }
3887
3888 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3889 {
3890         int r, i;
3891         struct amdgpu_ring *ring;
3892
3893         if (!(adev->flags & AMD_IS_APU))
3894                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3895
3896         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3897                 if (adev->gfx.num_gfx_rings) {
3898                         /* legacy firmware loading */
3899                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3900                         if (r)
3901                                 return r;
3902                 }
3903
3904                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3905                 if (r)
3906                         return r;
3907         }
3908
3909         r = gfx_v9_0_kiq_resume(adev);
3910         if (r)
3911                 return r;
3912
3913         if (adev->gfx.num_gfx_rings) {
3914                 r = gfx_v9_0_cp_gfx_resume(adev);
3915                 if (r)
3916                         return r;
3917         }
3918
3919         r = gfx_v9_0_kcq_resume(adev);
3920         if (r)
3921                 return r;
3922
3923         if (adev->gfx.num_gfx_rings) {
3924                 ring = &adev->gfx.gfx_ring[0];
3925                 r = amdgpu_ring_test_helper(ring);
3926                 if (r)
3927                         return r;
3928         }
3929
3930         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3931                 ring = &adev->gfx.compute_ring[i];
3932                 amdgpu_ring_test_helper(ring);
3933         }
3934
3935         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3936
3937         return 0;
3938 }
3939
3940 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3941 {
3942         u32 tmp;
3943
3944         if (adev->asic_type != CHIP_ARCTURUS)
3945                 return;
3946
3947         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3948         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3949                                 adev->df.hash_status.hash_64k);
3950         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3951                                 adev->df.hash_status.hash_2m);
3952         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3953                                 adev->df.hash_status.hash_1g);
3954         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3955 }
3956
3957 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3958 {
3959         if (adev->gfx.num_gfx_rings)
3960                 gfx_v9_0_cp_gfx_enable(adev, enable);
3961         gfx_v9_0_cp_compute_enable(adev, enable);
3962 }
3963
3964 static int gfx_v9_0_hw_init(void *handle)
3965 {
3966         int r;
3967         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3968
3969         if (!amdgpu_sriov_vf(adev))
3970                 gfx_v9_0_init_golden_registers(adev);
3971
3972         gfx_v9_0_constants_init(adev);
3973
3974         gfx_v9_0_init_tcp_config(adev);
3975
3976         r = adev->gfx.rlc.funcs->resume(adev);
3977         if (r)
3978                 return r;
3979
3980         r = gfx_v9_0_cp_resume(adev);
3981         if (r)
3982                 return r;
3983
3984         if (adev->asic_type == CHIP_ALDEBARAN)
3985                 gfx_v9_4_2_set_power_brake_sequence(adev);
3986
3987         return r;
3988 }
3989
3990 static int gfx_v9_0_hw_fini(void *handle)
3991 {
3992         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3993
3994         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3995         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3996         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3997
3998         /* DF freeze and kcq disable will fail */
3999         if (!amdgpu_ras_intr_triggered())
4000                 /* disable KCQ to avoid CPC touch memory not valid anymore */
4001                 amdgpu_gfx_disable_kcq(adev);
4002
4003         if (amdgpu_sriov_vf(adev)) {
4004                 gfx_v9_0_cp_gfx_enable(adev, false);
4005                 /* must disable polling for SRIOV when hw finished, otherwise
4006                  * CPC engine may still keep fetching WB address which is already
4007                  * invalid after sw finished and trigger DMAR reading error in
4008                  * hypervisor side.
4009                  */
4010                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4011                 return 0;
4012         }
4013
4014         /* Use deinitialize sequence from CAIL when unbinding device from driver,
4015          * otherwise KIQ is hanging when binding back
4016          */
4017         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4018                 mutex_lock(&adev->srbm_mutex);
4019                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
4020                                 adev->gfx.kiq.ring.pipe,
4021                                 adev->gfx.kiq.ring.queue, 0);
4022                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
4023                 soc15_grbm_select(adev, 0, 0, 0, 0);
4024                 mutex_unlock(&adev->srbm_mutex);
4025         }
4026
4027         gfx_v9_0_cp_enable(adev, false);
4028         adev->gfx.rlc.funcs->stop(adev);
4029
4030         return 0;
4031 }
4032
4033 static int gfx_v9_0_suspend(void *handle)
4034 {
4035         return gfx_v9_0_hw_fini(handle);
4036 }
4037
4038 static int gfx_v9_0_resume(void *handle)
4039 {
4040         return gfx_v9_0_hw_init(handle);
4041 }
4042
4043 static bool gfx_v9_0_is_idle(void *handle)
4044 {
4045         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4046
4047         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4048                                 GRBM_STATUS, GUI_ACTIVE))
4049                 return false;
4050         else
4051                 return true;
4052 }
4053
4054 static int gfx_v9_0_wait_for_idle(void *handle)
4055 {
4056         unsigned i;
4057         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4058
4059         for (i = 0; i < adev->usec_timeout; i++) {
4060                 if (gfx_v9_0_is_idle(handle))
4061                         return 0;
4062                 udelay(1);
4063         }
4064         return -ETIMEDOUT;
4065 }
4066
4067 static int gfx_v9_0_soft_reset(void *handle)
4068 {
4069         u32 grbm_soft_reset = 0;
4070         u32 tmp;
4071         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4072
4073         /* GRBM_STATUS */
4074         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4075         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4076                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4077                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4078                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4079                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4080                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4081                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4082                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4083                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4084                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4085         }
4086
4087         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4088                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4089                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4090         }
4091
4092         /* GRBM_STATUS2 */
4093         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4094         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4095                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4096                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4097
4098
4099         if (grbm_soft_reset) {
4100                 /* stop the rlc */
4101                 adev->gfx.rlc.funcs->stop(adev);
4102
4103                 if (adev->gfx.num_gfx_rings)
4104                         /* Disable GFX parsing/prefetching */
4105                         gfx_v9_0_cp_gfx_enable(adev, false);
4106
4107                 /* Disable MEC parsing/prefetching */
4108                 gfx_v9_0_cp_compute_enable(adev, false);
4109
4110                 if (grbm_soft_reset) {
4111                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4112                         tmp |= grbm_soft_reset;
4113                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4114                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4115                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4116
4117                         udelay(50);
4118
4119                         tmp &= ~grbm_soft_reset;
4120                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4121                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4122                 }
4123
4124                 /* Wait a little for things to settle down */
4125                 udelay(50);
4126         }
4127         return 0;
4128 }
4129
4130 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4131 {
4132         signed long r, cnt = 0;
4133         unsigned long flags;
4134         uint32_t seq, reg_val_offs = 0;
4135         uint64_t value = 0;
4136         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4137         struct amdgpu_ring *ring = &kiq->ring;
4138
4139         BUG_ON(!ring->funcs->emit_rreg);
4140
4141         spin_lock_irqsave(&kiq->ring_lock, flags);
4142         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4143                 pr_err("critical bug! too many kiq readers\n");
4144                 goto failed_unlock;
4145         }
4146         amdgpu_ring_alloc(ring, 32);
4147         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4148         amdgpu_ring_write(ring, 9 |     /* src: register*/
4149                                 (5 << 8) |      /* dst: memory */
4150                                 (1 << 16) |     /* count sel */
4151                                 (1 << 20));     /* write confirm */
4152         amdgpu_ring_write(ring, 0);
4153         amdgpu_ring_write(ring, 0);
4154         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4155                                 reg_val_offs * 4));
4156         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4157                                 reg_val_offs * 4));
4158         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4159         if (r)
4160                 goto failed_undo;
4161
4162         amdgpu_ring_commit(ring);
4163         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4164
4165         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4166
4167         /* don't wait anymore for gpu reset case because this way may
4168          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4169          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4170          * never return if we keep waiting in virt_kiq_rreg, which cause
4171          * gpu_recover() hang there.
4172          *
4173          * also don't wait anymore for IRQ context
4174          * */
4175         if (r < 1 && (amdgpu_in_reset(adev)))
4176                 goto failed_kiq_read;
4177
4178         might_sleep();
4179         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4180                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4181                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4182         }
4183
4184         if (cnt > MAX_KIQ_REG_TRY)
4185                 goto failed_kiq_read;
4186
4187         mb();
4188         value = (uint64_t)adev->wb.wb[reg_val_offs] |
4189                 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4190         amdgpu_device_wb_free(adev, reg_val_offs);
4191         return value;
4192
4193 failed_undo:
4194         amdgpu_ring_undo(ring);
4195 failed_unlock:
4196         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4197 failed_kiq_read:
4198         if (reg_val_offs)
4199                 amdgpu_device_wb_free(adev, reg_val_offs);
4200         pr_err("failed to read gpu clock\n");
4201         return ~0;
4202 }
4203
4204 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4205 {
4206         uint64_t clock;
4207
4208         amdgpu_gfx_off_ctrl(adev, false);
4209         mutex_lock(&adev->gfx.gpu_clock_mutex);
4210         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4211                 clock = gfx_v9_0_kiq_read_clock(adev);
4212         } else {
4213                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4214                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4215                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4216         }
4217         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4218         amdgpu_gfx_off_ctrl(adev, true);
4219         return clock;
4220 }
4221
4222 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4223                                           uint32_t vmid,
4224                                           uint32_t gds_base, uint32_t gds_size,
4225                                           uint32_t gws_base, uint32_t gws_size,
4226                                           uint32_t oa_base, uint32_t oa_size)
4227 {
4228         struct amdgpu_device *adev = ring->adev;
4229
4230         /* GDS Base */
4231         gfx_v9_0_write_data_to_reg(ring, 0, false,
4232                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4233                                    gds_base);
4234
4235         /* GDS Size */
4236         gfx_v9_0_write_data_to_reg(ring, 0, false,
4237                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4238                                    gds_size);
4239
4240         /* GWS */
4241         gfx_v9_0_write_data_to_reg(ring, 0, false,
4242                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4243                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4244
4245         /* OA */
4246         gfx_v9_0_write_data_to_reg(ring, 0, false,
4247                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4248                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4249 }
4250
4251 static const u32 vgpr_init_compute_shader[] =
4252 {
4253         0xb07c0000, 0xbe8000ff,
4254         0x000000f8, 0xbf110800,
4255         0x7e000280, 0x7e020280,
4256         0x7e040280, 0x7e060280,
4257         0x7e080280, 0x7e0a0280,
4258         0x7e0c0280, 0x7e0e0280,
4259         0x80808800, 0xbe803200,
4260         0xbf84fff5, 0xbf9c0000,
4261         0xd28c0001, 0x0001007f,
4262         0xd28d0001, 0x0002027e,
4263         0x10020288, 0xb8810904,
4264         0xb7814000, 0xd1196a01,
4265         0x00000301, 0xbe800087,
4266         0xbefc00c1, 0xd89c4000,
4267         0x00020201, 0xd89cc080,
4268         0x00040401, 0x320202ff,
4269         0x00000800, 0x80808100,
4270         0xbf84fff8, 0x7e020280,
4271         0xbf810000, 0x00000000,
4272 };
4273
4274 static const u32 sgpr_init_compute_shader[] =
4275 {
4276         0xb07c0000, 0xbe8000ff,
4277         0x0000005f, 0xbee50080,
4278         0xbe812c65, 0xbe822c65,
4279         0xbe832c65, 0xbe842c65,
4280         0xbe852c65, 0xb77c0005,
4281         0x80808500, 0xbf84fff8,
4282         0xbe800080, 0xbf810000,
4283 };
4284
4285 static const u32 vgpr_init_compute_shader_arcturus[] = {
4286         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4287         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4288         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4289         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4290         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4291         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4292         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4293         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4294         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4295         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4296         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4297         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4298         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4299         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4300         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4301         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4302         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4303         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4304         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4305         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4306         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4307         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4308         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4309         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4310         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4311         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4312         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4313         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4314         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4315         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4316         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4317         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4318         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4319         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4320         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4321         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4322         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4323         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4324         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4325         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4326         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4327         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4328         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4329         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4330         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4331         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4332         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4333         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4334         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4335         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4336         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4337         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4338         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4339         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4340         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4341         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4342         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4343         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4344         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4345         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4346         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4347         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4348         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4349         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4350         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4351         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4352         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4353         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4354         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4355         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4356         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4357         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4358         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4359         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4360         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4361         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4362         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4363         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4364         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4365         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4366         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4367         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4368         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4369         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4370         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4371         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4372         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4373         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4374         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4375         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4376         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4377         0xbf84fff8, 0xbf810000,
4378 };
4379
4380 /* When below register arrays changed, please update gpr_reg_size,
4381   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4382   to cover all gfx9 ASICs */
4383 static const struct soc15_reg_entry vgpr_init_regs[] = {
4384    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4385    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4386    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4387    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4388    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4389    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4390    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4391    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4392    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4393    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4394    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4395    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4396    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4397    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4398 };
4399
4400 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4401    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4402    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4403    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4404    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4405    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4406    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4407    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4408    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4409    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4410    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4411    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4412    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4413    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4414    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4415 };
4416
4417 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4418    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4419    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4420    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4421    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4422    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4423    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4424    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4425    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4426    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4427    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4428    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4429    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4430    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4431    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4432 };
4433
4434 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4435    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4436    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4437    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4438    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4439    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4440    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4441    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4442    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4443    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4444    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4445    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4446    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4447    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4448    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4449 };
4450
4451 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4452    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4453    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4454    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4455    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4456    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4457    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4458    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4459    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4460    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4461    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4462    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4463    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4464    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4465    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4466    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4467    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4468    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4469    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4470    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4471    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4472    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4473    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4474    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4475    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4476    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4477    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4478    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4479    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4480    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4481    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4482    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4483    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4484    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4485 };
4486
4487 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4488 {
4489         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4490         int i, r;
4491
4492         /* only support when RAS is enabled */
4493         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4494                 return 0;
4495
4496         r = amdgpu_ring_alloc(ring, 7);
4497         if (r) {
4498                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4499                         ring->name, r);
4500                 return r;
4501         }
4502
4503         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4504         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4505
4506         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4507         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4508                                 PACKET3_DMA_DATA_DST_SEL(1) |
4509                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4510                                 PACKET3_DMA_DATA_ENGINE(0)));
4511         amdgpu_ring_write(ring, 0);
4512         amdgpu_ring_write(ring, 0);
4513         amdgpu_ring_write(ring, 0);
4514         amdgpu_ring_write(ring, 0);
4515         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4516                                 adev->gds.gds_size);
4517
4518         amdgpu_ring_commit(ring);
4519
4520         for (i = 0; i < adev->usec_timeout; i++) {
4521                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4522                         break;
4523                 udelay(1);
4524         }
4525
4526         if (i >= adev->usec_timeout)
4527                 r = -ETIMEDOUT;
4528
4529         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4530
4531         return r;
4532 }
4533
4534 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4535 {
4536         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4537         struct amdgpu_ib ib;
4538         struct dma_fence *f = NULL;
4539         int r, i;
4540         unsigned total_size, vgpr_offset, sgpr_offset;
4541         u64 gpu_addr;
4542
4543         int compute_dim_x = adev->gfx.config.max_shader_engines *
4544                                                 adev->gfx.config.max_cu_per_sh *
4545                                                 adev->gfx.config.max_sh_per_se;
4546         int sgpr_work_group_size = 5;
4547         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4548         int vgpr_init_shader_size;
4549         const u32 *vgpr_init_shader_ptr;
4550         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4551
4552         /* only support when RAS is enabled */
4553         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4554                 return 0;
4555
4556         /* bail if the compute ring is not ready */
4557         if (!ring->sched.ready)
4558                 return 0;
4559
4560         if (adev->asic_type == CHIP_ARCTURUS ||
4561             adev->asic_type == CHIP_ALDEBARAN) {
4562                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4563                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4564                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4565         } else {
4566                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4567                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4568                 vgpr_init_regs_ptr = vgpr_init_regs;
4569         }
4570
4571         total_size =
4572                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4573         total_size +=
4574                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4575         total_size +=
4576                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4577         total_size = ALIGN(total_size, 256);
4578         vgpr_offset = total_size;
4579         total_size += ALIGN(vgpr_init_shader_size, 256);
4580         sgpr_offset = total_size;
4581         total_size += sizeof(sgpr_init_compute_shader);
4582
4583         /* allocate an indirect buffer to put the commands in */
4584         memset(&ib, 0, sizeof(ib));
4585         r = amdgpu_ib_get(adev, NULL, total_size,
4586                                         AMDGPU_IB_POOL_DIRECT, &ib);
4587         if (r) {
4588                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4589                 return r;
4590         }
4591
4592         /* load the compute shaders */
4593         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4594                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4595
4596         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4597                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4598
4599         /* init the ib length to 0 */
4600         ib.length_dw = 0;
4601
4602         /* VGPR */
4603         /* write the register state for the compute dispatch */
4604         for (i = 0; i < gpr_reg_size; i++) {
4605                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4606                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4607                                                                 - PACKET3_SET_SH_REG_START;
4608                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4609         }
4610         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4611         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4612         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4613         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4614                                                         - PACKET3_SET_SH_REG_START;
4615         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4616         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4617
4618         /* write dispatch packet */
4619         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4620         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4621         ib.ptr[ib.length_dw++] = 1; /* y */
4622         ib.ptr[ib.length_dw++] = 1; /* z */
4623         ib.ptr[ib.length_dw++] =
4624                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4625
4626         /* write CS partial flush packet */
4627         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4628         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4629
4630         /* SGPR1 */
4631         /* write the register state for the compute dispatch */
4632         for (i = 0; i < gpr_reg_size; i++) {
4633                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4634                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4635                                                                 - PACKET3_SET_SH_REG_START;
4636                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4637         }
4638         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4639         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4640         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4641         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4642                                                         - PACKET3_SET_SH_REG_START;
4643         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4644         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4645
4646         /* write dispatch packet */
4647         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4648         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4649         ib.ptr[ib.length_dw++] = 1; /* y */
4650         ib.ptr[ib.length_dw++] = 1; /* z */
4651         ib.ptr[ib.length_dw++] =
4652                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4653
4654         /* write CS partial flush packet */
4655         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4656         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4657
4658         /* SGPR2 */
4659         /* write the register state for the compute dispatch */
4660         for (i = 0; i < gpr_reg_size; i++) {
4661                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4662                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4663                                                                 - PACKET3_SET_SH_REG_START;
4664                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4665         }
4666         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4667         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4668         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4669         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4670                                                         - PACKET3_SET_SH_REG_START;
4671         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4672         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4673
4674         /* write dispatch packet */
4675         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4676         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4677         ib.ptr[ib.length_dw++] = 1; /* y */
4678         ib.ptr[ib.length_dw++] = 1; /* z */
4679         ib.ptr[ib.length_dw++] =
4680                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4681
4682         /* write CS partial flush packet */
4683         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4684         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4685
4686         /* shedule the ib on the ring */
4687         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4688         if (r) {
4689                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4690                 goto fail;
4691         }
4692
4693         /* wait for the GPU to finish processing the IB */
4694         r = dma_fence_wait(f, false);
4695         if (r) {
4696                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4697                 goto fail;
4698         }
4699
4700 fail:
4701         amdgpu_ib_free(adev, &ib, NULL);
4702         dma_fence_put(f);
4703
4704         return r;
4705 }
4706
4707 static int gfx_v9_0_early_init(void *handle)
4708 {
4709         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4710
4711         if (adev->asic_type == CHIP_ARCTURUS ||
4712             adev->asic_type == CHIP_ALDEBARAN)
4713                 adev->gfx.num_gfx_rings = 0;
4714         else
4715                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4716         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4717                                           AMDGPU_MAX_COMPUTE_RINGS);
4718         gfx_v9_0_set_kiq_pm4_funcs(adev);
4719         gfx_v9_0_set_ring_funcs(adev);
4720         gfx_v9_0_set_irq_funcs(adev);
4721         gfx_v9_0_set_gds_init(adev);
4722         gfx_v9_0_set_rlc_funcs(adev);
4723
4724         return 0;
4725 }
4726
4727 static int gfx_v9_0_ecc_late_init(void *handle)
4728 {
4729         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4730         int r;
4731
4732         /*
4733          * Temp workaround to fix the issue that CP firmware fails to
4734          * update read pointer when CPDMA is writing clearing operation
4735          * to GDS in suspend/resume sequence on several cards. So just
4736          * limit this operation in cold boot sequence.
4737          */
4738         if ((!adev->in_suspend) &&
4739             (adev->gds.gds_size)) {
4740                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4741                 if (r)
4742                         return r;
4743         }
4744
4745         /* requires IBs so do in late init after IB pool is initialized */
4746         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4747         if (r)
4748                 return r;
4749
4750         r = amdgpu_gfx_ras_late_init(adev);
4751         if (r)
4752                 return r;
4753
4754         if (adev->gfx.funcs->enable_watchdog_timer)
4755                 adev->gfx.funcs->enable_watchdog_timer(adev);
4756
4757         return 0;
4758 }
4759
4760 static int gfx_v9_0_late_init(void *handle)
4761 {
4762         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4763         int r;
4764
4765         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4766         if (r)
4767                 return r;
4768
4769         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4770         if (r)
4771                 return r;
4772
4773         r = gfx_v9_0_ecc_late_init(handle);
4774         if (r)
4775                 return r;
4776
4777         return 0;
4778 }
4779
4780 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4781 {
4782         uint32_t rlc_setting;
4783
4784         /* if RLC is not enabled, do nothing */
4785         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4786         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4787                 return false;
4788
4789         return true;
4790 }
4791
4792 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4793 {
4794         uint32_t data;
4795         unsigned i;
4796
4797         data = RLC_SAFE_MODE__CMD_MASK;
4798         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4799         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4800
4801         /* wait for RLC_SAFE_MODE */
4802         for (i = 0; i < adev->usec_timeout; i++) {
4803                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4804                         break;
4805                 udelay(1);
4806         }
4807 }
4808
4809 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4810 {
4811         uint32_t data;
4812
4813         data = RLC_SAFE_MODE__CMD_MASK;
4814         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4815 }
4816
4817 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4818                                                 bool enable)
4819 {
4820         amdgpu_gfx_rlc_enter_safe_mode(adev);
4821
4822         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4823                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4824                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4825                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4826         } else {
4827                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4828                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4829                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4830         }
4831
4832         amdgpu_gfx_rlc_exit_safe_mode(adev);
4833 }
4834
4835 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4836                                                 bool enable)
4837 {
4838         /* TODO: double check if we need to perform under safe mode */
4839         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4840
4841         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4842                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4843         else
4844                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4845
4846         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4847                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4848         else
4849                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4850
4851         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4852 }
4853
4854 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4855                                                       bool enable)
4856 {
4857         uint32_t data, def;
4858
4859         amdgpu_gfx_rlc_enter_safe_mode(adev);
4860
4861         /* It is disabled by HW by default */
4862         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4863                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4864                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4865
4866                 if (adev->asic_type != CHIP_VEGA12)
4867                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4868
4869                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4870                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4871                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4872
4873                 /* only for Vega10 & Raven1 */
4874                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4875
4876                 if (def != data)
4877                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4878
4879                 /* MGLS is a global flag to control all MGLS in GFX */
4880                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4881                         /* 2 - RLC memory Light sleep */
4882                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4883                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4884                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4885                                 if (def != data)
4886                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4887                         }
4888                         /* 3 - CP memory Light sleep */
4889                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4890                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4891                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4892                                 if (def != data)
4893                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4894                         }
4895                 }
4896         } else {
4897                 /* 1 - MGCG_OVERRIDE */
4898                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4899
4900                 if (adev->asic_type != CHIP_VEGA12)
4901                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4902
4903                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4904                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4905                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4906                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4907
4908                 if (def != data)
4909                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4910
4911                 /* 2 - disable MGLS in RLC */
4912                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4913                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4914                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4915                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4916                 }
4917
4918                 /* 3 - disable MGLS in CP */
4919                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4920                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4921                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4922                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4923                 }
4924         }
4925
4926         amdgpu_gfx_rlc_exit_safe_mode(adev);
4927 }
4928
4929 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4930                                            bool enable)
4931 {
4932         uint32_t data, def;
4933
4934         if (!adev->gfx.num_gfx_rings)
4935                 return;
4936
4937         amdgpu_gfx_rlc_enter_safe_mode(adev);
4938
4939         /* Enable 3D CGCG/CGLS */
4940         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4941                 /* write cmd to clear cgcg/cgls ov */
4942                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4943                 /* unset CGCG override */
4944                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4945                 /* update CGCG and CGLS override bits */
4946                 if (def != data)
4947                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4948
4949                 /* enable 3Dcgcg FSM(0x0000363f) */
4950                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4951
4952                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4953                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4954                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4955                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4956                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4957                 if (def != data)
4958                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4959
4960                 /* set IDLE_POLL_COUNT(0x00900100) */
4961                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4962                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4963                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4964                 if (def != data)
4965                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4966         } else {
4967                 /* Disable CGCG/CGLS */
4968                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4969                 /* disable cgcg, cgls should be disabled */
4970                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4971                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4972                 /* disable cgcg and cgls in FSM */
4973                 if (def != data)
4974                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4975         }
4976
4977         amdgpu_gfx_rlc_exit_safe_mode(adev);
4978 }
4979
4980 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4981                                                       bool enable)
4982 {
4983         uint32_t def, data;
4984
4985         amdgpu_gfx_rlc_enter_safe_mode(adev);
4986
4987         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4988                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4989                 /* unset CGCG override */
4990                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4991                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4992                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4993                 else
4994                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4995                 /* update CGCG and CGLS override bits */
4996                 if (def != data)
4997                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4998
4999                 /* enable cgcg FSM(0x0000363F) */
5000                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5001
5002                 if (adev->asic_type == CHIP_ARCTURUS)
5003                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5004                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5005                 else
5006                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5007                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5008                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5009                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5010                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5011                 if (def != data)
5012                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5013
5014                 /* set IDLE_POLL_COUNT(0x00900100) */
5015                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5016                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5017                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5018                 if (def != data)
5019                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5020         } else {
5021                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5022                 /* reset CGCG/CGLS bits */
5023                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5024                 /* disable cgcg and cgls in FSM */
5025                 if (def != data)
5026                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5027         }
5028
5029         amdgpu_gfx_rlc_exit_safe_mode(adev);
5030 }
5031
5032 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5033                                             bool enable)
5034 {
5035         if (enable) {
5036                 /* CGCG/CGLS should be enabled after MGCG/MGLS
5037                  * ===  MGCG + MGLS ===
5038                  */
5039                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5040                 /* ===  CGCG /CGLS for GFX 3D Only === */
5041                 gfx_v9_0_update_3d_clock_gating(adev, enable);
5042                 /* ===  CGCG + CGLS === */
5043                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5044         } else {
5045                 /* CGCG/CGLS should be disabled before MGCG/MGLS
5046                  * ===  CGCG + CGLS ===
5047                  */
5048                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5049                 /* ===  CGCG /CGLS for GFX 3D Only === */
5050                 gfx_v9_0_update_3d_clock_gating(adev, enable);
5051                 /* ===  MGCG + MGLS === */
5052                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5053         }
5054         return 0;
5055 }
5056
5057 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5058 {
5059         u32 reg, data;
5060
5061         reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5062         if (amdgpu_sriov_is_pp_one_vf(adev))
5063                 data = RREG32_NO_KIQ(reg);
5064         else
5065                 data = RREG32(reg);
5066
5067         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5068         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5069
5070         if (amdgpu_sriov_is_pp_one_vf(adev))
5071                 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5072         else
5073                 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5074 }
5075
5076 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5077                                         uint32_t offset,
5078                                         struct soc15_reg_rlcg *entries, int arr_size)
5079 {
5080         int i;
5081         uint32_t reg;
5082
5083         if (!entries)
5084                 return false;
5085
5086         for (i = 0; i < arr_size; i++) {
5087                 const struct soc15_reg_rlcg *entry;
5088
5089                 entry = &entries[i];
5090                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5091                 if (offset == reg)
5092                         return true;
5093         }
5094
5095         return false;
5096 }
5097
5098 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5099 {
5100         return gfx_v9_0_check_rlcg_range(adev, offset,
5101                                         (void *)rlcg_access_gc_9_0,
5102                                         ARRAY_SIZE(rlcg_access_gc_9_0));
5103 }
5104
5105 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5106         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5107         .set_safe_mode = gfx_v9_0_set_safe_mode,
5108         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5109         .init = gfx_v9_0_rlc_init,
5110         .get_csb_size = gfx_v9_0_get_csb_size,
5111         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5112         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5113         .resume = gfx_v9_0_rlc_resume,
5114         .stop = gfx_v9_0_rlc_stop,
5115         .reset = gfx_v9_0_rlc_reset,
5116         .start = gfx_v9_0_rlc_start,
5117         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5118         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5119         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5120 };
5121
5122 static int gfx_v9_0_set_powergating_state(void *handle,
5123                                           enum amd_powergating_state state)
5124 {
5125         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5126         bool enable = (state == AMD_PG_STATE_GATE);
5127
5128         switch (adev->asic_type) {
5129         case CHIP_RAVEN:
5130         case CHIP_RENOIR:
5131                 if (!enable)
5132                         amdgpu_gfx_off_ctrl(adev, false);
5133
5134                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5135                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5136                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5137                 } else {
5138                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5139                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5140                 }
5141
5142                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5143                         gfx_v9_0_enable_cp_power_gating(adev, true);
5144                 else
5145                         gfx_v9_0_enable_cp_power_gating(adev, false);
5146
5147                 /* update gfx cgpg state */
5148                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5149
5150                 /* update mgcg state */
5151                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5152
5153                 if (enable)
5154                         amdgpu_gfx_off_ctrl(adev, true);
5155                 break;
5156         case CHIP_VEGA12:
5157                 amdgpu_gfx_off_ctrl(adev, enable);
5158                 break;
5159         default:
5160                 break;
5161         }
5162
5163         return 0;
5164 }
5165
5166 static int gfx_v9_0_set_clockgating_state(void *handle,
5167                                           enum amd_clockgating_state state)
5168 {
5169         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5170
5171         if (amdgpu_sriov_vf(adev))
5172                 return 0;
5173
5174         switch (adev->asic_type) {
5175         case CHIP_VEGA10:
5176         case CHIP_VEGA12:
5177         case CHIP_VEGA20:
5178         case CHIP_RAVEN:
5179         case CHIP_ARCTURUS:
5180         case CHIP_RENOIR:
5181         case CHIP_ALDEBARAN:
5182                 gfx_v9_0_update_gfx_clock_gating(adev,
5183                                                  state == AMD_CG_STATE_GATE);
5184                 break;
5185         default:
5186                 break;
5187         }
5188         return 0;
5189 }
5190
5191 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5192 {
5193         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5194         int data;
5195
5196         if (amdgpu_sriov_vf(adev))
5197                 *flags = 0;
5198
5199         /* AMD_CG_SUPPORT_GFX_MGCG */
5200         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5201         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5202                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5203
5204         /* AMD_CG_SUPPORT_GFX_CGCG */
5205         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5206         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5207                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5208
5209         /* AMD_CG_SUPPORT_GFX_CGLS */
5210         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5211                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5212
5213         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5214         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5215         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5216                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5217
5218         /* AMD_CG_SUPPORT_GFX_CP_LS */
5219         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5220         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5221                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5222
5223         if (adev->asic_type != CHIP_ARCTURUS) {
5224                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5225                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5226                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5227                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5228
5229                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5230                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5231                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5232         }
5233 }
5234
5235 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5236 {
5237         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5238 }
5239
5240 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5241 {
5242         struct amdgpu_device *adev = ring->adev;
5243         u64 wptr;
5244
5245         /* XXX check if swapping is necessary on BE */
5246         if (ring->use_doorbell) {
5247                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5248         } else {
5249                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5250                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5251         }
5252
5253         return wptr;
5254 }
5255
5256 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5257 {
5258         struct amdgpu_device *adev = ring->adev;
5259
5260         if (ring->use_doorbell) {
5261                 /* XXX check if swapping is necessary on BE */
5262                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5263                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5264         } else {
5265                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5266                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5267         }
5268 }
5269
5270 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5271 {
5272         struct amdgpu_device *adev = ring->adev;
5273         u32 ref_and_mask, reg_mem_engine;
5274         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5275
5276         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5277                 switch (ring->me) {
5278                 case 1:
5279                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5280                         break;
5281                 case 2:
5282                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5283                         break;
5284                 default:
5285                         return;
5286                 }
5287                 reg_mem_engine = 0;
5288         } else {
5289                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5290                 reg_mem_engine = 1; /* pfp */
5291         }
5292
5293         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5294                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5295                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5296                               ref_and_mask, ref_and_mask, 0x20);
5297 }
5298
5299 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5300                                         struct amdgpu_job *job,
5301                                         struct amdgpu_ib *ib,
5302                                         uint32_t flags)
5303 {
5304         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5305         u32 header, control = 0;
5306
5307         if (ib->flags & AMDGPU_IB_FLAG_CE)
5308                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5309         else
5310                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5311
5312         control |= ib->length_dw | (vmid << 24);
5313
5314         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5315                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5316
5317                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5318                         gfx_v9_0_ring_emit_de_meta(ring);
5319         }
5320
5321         amdgpu_ring_write(ring, header);
5322         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5323         amdgpu_ring_write(ring,
5324 #ifdef __BIG_ENDIAN
5325                 (2 << 0) |
5326 #endif
5327                 lower_32_bits(ib->gpu_addr));
5328         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5329         amdgpu_ring_write(ring, control);
5330 }
5331
5332 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5333                                           struct amdgpu_job *job,
5334                                           struct amdgpu_ib *ib,
5335                                           uint32_t flags)
5336 {
5337         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5338         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5339
5340         /* Currently, there is a high possibility to get wave ID mismatch
5341          * between ME and GDS, leading to a hw deadlock, because ME generates
5342          * different wave IDs than the GDS expects. This situation happens
5343          * randomly when at least 5 compute pipes use GDS ordered append.
5344          * The wave IDs generated by ME are also wrong after suspend/resume.
5345          * Those are probably bugs somewhere else in the kernel driver.
5346          *
5347          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5348          * GDS to 0 for this ring (me/pipe).
5349          */
5350         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5351                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5352                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5353                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5354         }
5355
5356         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5357         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5358         amdgpu_ring_write(ring,
5359 #ifdef __BIG_ENDIAN
5360                                 (2 << 0) |
5361 #endif
5362                                 lower_32_bits(ib->gpu_addr));
5363         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5364         amdgpu_ring_write(ring, control);
5365 }
5366
5367 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5368                                      u64 seq, unsigned flags)
5369 {
5370         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5371         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5372         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5373
5374         /* RELEASE_MEM - flush caches, send int */
5375         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5376         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5377                                                EOP_TC_NC_ACTION_EN) :
5378                                               (EOP_TCL1_ACTION_EN |
5379                                                EOP_TC_ACTION_EN |
5380                                                EOP_TC_WB_ACTION_EN |
5381                                                EOP_TC_MD_ACTION_EN)) |
5382                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5383                                  EVENT_INDEX(5)));
5384         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5385
5386         /*
5387          * the address should be Qword aligned if 64bit write, Dword
5388          * aligned if only send 32bit data low (discard data high)
5389          */
5390         if (write64bit)
5391                 BUG_ON(addr & 0x7);
5392         else
5393                 BUG_ON(addr & 0x3);
5394         amdgpu_ring_write(ring, lower_32_bits(addr));
5395         amdgpu_ring_write(ring, upper_32_bits(addr));
5396         amdgpu_ring_write(ring, lower_32_bits(seq));
5397         amdgpu_ring_write(ring, upper_32_bits(seq));
5398         amdgpu_ring_write(ring, 0);
5399 }
5400
5401 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5402 {
5403         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5404         uint32_t seq = ring->fence_drv.sync_seq;
5405         uint64_t addr = ring->fence_drv.gpu_addr;
5406
5407         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5408                               lower_32_bits(addr), upper_32_bits(addr),
5409                               seq, 0xffffffff, 4);
5410 }
5411
5412 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5413                                         unsigned vmid, uint64_t pd_addr)
5414 {
5415         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5416
5417         /* compute doesn't have PFP */
5418         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5419                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5420                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5421                 amdgpu_ring_write(ring, 0x0);
5422         }
5423 }
5424
5425 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5426 {
5427         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5428 }
5429
5430 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5431 {
5432         u64 wptr;
5433
5434         /* XXX check if swapping is necessary on BE */
5435         if (ring->use_doorbell)
5436                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5437         else
5438                 BUG();
5439         return wptr;
5440 }
5441
5442 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5443 {
5444         struct amdgpu_device *adev = ring->adev;
5445
5446         /* XXX check if swapping is necessary on BE */
5447         if (ring->use_doorbell) {
5448                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5449                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5450         } else{
5451                 BUG(); /* only DOORBELL method supported on gfx9 now */
5452         }
5453 }
5454
5455 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5456                                          u64 seq, unsigned int flags)
5457 {
5458         struct amdgpu_device *adev = ring->adev;
5459
5460         /* we only allocate 32bit for each seq wb address */
5461         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5462
5463         /* write fence seq to the "addr" */
5464         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5465         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5466                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5467         amdgpu_ring_write(ring, lower_32_bits(addr));
5468         amdgpu_ring_write(ring, upper_32_bits(addr));
5469         amdgpu_ring_write(ring, lower_32_bits(seq));
5470
5471         if (flags & AMDGPU_FENCE_FLAG_INT) {
5472                 /* set register to trigger INT */
5473                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5474                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5475                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5476                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5477                 amdgpu_ring_write(ring, 0);
5478                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5479         }
5480 }
5481
5482 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5483 {
5484         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5485         amdgpu_ring_write(ring, 0);
5486 }
5487
5488 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5489 {
5490         struct v9_ce_ib_state ce_payload = {0};
5491         uint64_t csa_addr;
5492         int cnt;
5493
5494         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5495         csa_addr = amdgpu_csa_vaddr(ring->adev);
5496
5497         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5498         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5499                                  WRITE_DATA_DST_SEL(8) |
5500                                  WR_CONFIRM) |
5501                                  WRITE_DATA_CACHE_POLICY(0));
5502         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5503         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5504         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5505 }
5506
5507 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5508 {
5509         struct v9_de_ib_state de_payload = {0};
5510         uint64_t csa_addr, gds_addr;
5511         int cnt;
5512
5513         csa_addr = amdgpu_csa_vaddr(ring->adev);
5514         gds_addr = csa_addr + 4096;
5515         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5516         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5517
5518         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5519         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5520         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5521                                  WRITE_DATA_DST_SEL(8) |
5522                                  WR_CONFIRM) |
5523                                  WRITE_DATA_CACHE_POLICY(0));
5524         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5525         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5526         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5527 }
5528
5529 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5530                                    bool secure)
5531 {
5532         uint32_t v = secure ? FRAME_TMZ : 0;
5533
5534         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5535         amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5536 }
5537
5538 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5539 {
5540         uint32_t dw2 = 0;
5541
5542         if (amdgpu_sriov_vf(ring->adev))
5543                 gfx_v9_0_ring_emit_ce_meta(ring);
5544
5545         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5546         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5547                 /* set load_global_config & load_global_uconfig */
5548                 dw2 |= 0x8001;
5549                 /* set load_cs_sh_regs */
5550                 dw2 |= 0x01000000;
5551                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5552                 dw2 |= 0x10002;
5553
5554                 /* set load_ce_ram if preamble presented */
5555                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5556                         dw2 |= 0x10000000;
5557         } else {
5558                 /* still load_ce_ram if this is the first time preamble presented
5559                  * although there is no context switch happens.
5560                  */
5561                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5562                         dw2 |= 0x10000000;
5563         }
5564
5565         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5566         amdgpu_ring_write(ring, dw2);
5567         amdgpu_ring_write(ring, 0);
5568 }
5569
5570 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5571 {
5572         unsigned ret;
5573         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5574         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5575         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5576         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5577         ret = ring->wptr & ring->buf_mask;
5578         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5579         return ret;
5580 }
5581
5582 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5583 {
5584         unsigned cur;
5585         BUG_ON(offset > ring->buf_mask);
5586         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5587
5588         cur = (ring->wptr & ring->buf_mask) - 1;
5589         if (likely(cur > offset))
5590                 ring->ring[offset] = cur - offset;
5591         else
5592                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5593 }
5594
5595 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5596                                     uint32_t reg_val_offs)
5597 {
5598         struct amdgpu_device *adev = ring->adev;
5599
5600         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5601         amdgpu_ring_write(ring, 0 |     /* src: register*/
5602                                 (5 << 8) |      /* dst: memory */
5603                                 (1 << 20));     /* write confirm */
5604         amdgpu_ring_write(ring, reg);
5605         amdgpu_ring_write(ring, 0);
5606         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5607                                 reg_val_offs * 4));
5608         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5609                                 reg_val_offs * 4));
5610 }
5611
5612 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5613                                     uint32_t val)
5614 {
5615         uint32_t cmd = 0;
5616
5617         switch (ring->funcs->type) {
5618         case AMDGPU_RING_TYPE_GFX:
5619                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5620                 break;
5621         case AMDGPU_RING_TYPE_KIQ:
5622                 cmd = (1 << 16); /* no inc addr */
5623                 break;
5624         default:
5625                 cmd = WR_CONFIRM;
5626                 break;
5627         }
5628         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5629         amdgpu_ring_write(ring, cmd);
5630         amdgpu_ring_write(ring, reg);
5631         amdgpu_ring_write(ring, 0);
5632         amdgpu_ring_write(ring, val);
5633 }
5634
5635 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5636                                         uint32_t val, uint32_t mask)
5637 {
5638         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5639 }
5640
5641 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5642                                                   uint32_t reg0, uint32_t reg1,
5643                                                   uint32_t ref, uint32_t mask)
5644 {
5645         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5646         struct amdgpu_device *adev = ring->adev;
5647         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5648                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5649
5650         if (fw_version_ok)
5651                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5652                                       ref, mask, 0x20);
5653         else
5654                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5655                                                            ref, mask);
5656 }
5657
5658 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5659 {
5660         struct amdgpu_device *adev = ring->adev;
5661         uint32_t value = 0;
5662
5663         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5664         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5665         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5666         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5667         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5668 }
5669
5670 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5671                                                  enum amdgpu_interrupt_state state)
5672 {
5673         switch (state) {
5674         case AMDGPU_IRQ_STATE_DISABLE:
5675         case AMDGPU_IRQ_STATE_ENABLE:
5676                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5677                                TIME_STAMP_INT_ENABLE,
5678                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5679                 break;
5680         default:
5681                 break;
5682         }
5683 }
5684
5685 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5686                                                      int me, int pipe,
5687                                                      enum amdgpu_interrupt_state state)
5688 {
5689         u32 mec_int_cntl, mec_int_cntl_reg;
5690
5691         /*
5692          * amdgpu controls only the first MEC. That's why this function only
5693          * handles the setting of interrupts for this specific MEC. All other
5694          * pipes' interrupts are set by amdkfd.
5695          */
5696
5697         if (me == 1) {
5698                 switch (pipe) {
5699                 case 0:
5700                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5701                         break;
5702                 case 1:
5703                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5704                         break;
5705                 case 2:
5706                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5707                         break;
5708                 case 3:
5709                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5710                         break;
5711                 default:
5712                         DRM_DEBUG("invalid pipe %d\n", pipe);
5713                         return;
5714                 }
5715         } else {
5716                 DRM_DEBUG("invalid me %d\n", me);
5717                 return;
5718         }
5719
5720         switch (state) {
5721         case AMDGPU_IRQ_STATE_DISABLE:
5722                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5723                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5724                                              TIME_STAMP_INT_ENABLE, 0);
5725                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5726                 break;
5727         case AMDGPU_IRQ_STATE_ENABLE:
5728                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5729                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5730                                              TIME_STAMP_INT_ENABLE, 1);
5731                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5732                 break;
5733         default:
5734                 break;
5735         }
5736 }
5737
5738 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5739                                              struct amdgpu_irq_src *source,
5740                                              unsigned type,
5741                                              enum amdgpu_interrupt_state state)
5742 {
5743         switch (state) {
5744         case AMDGPU_IRQ_STATE_DISABLE:
5745         case AMDGPU_IRQ_STATE_ENABLE:
5746                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5747                                PRIV_REG_INT_ENABLE,
5748                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5749                 break;
5750         default:
5751                 break;
5752         }
5753
5754         return 0;
5755 }
5756
5757 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5758                                               struct amdgpu_irq_src *source,
5759                                               unsigned type,
5760                                               enum amdgpu_interrupt_state state)
5761 {
5762         switch (state) {
5763         case AMDGPU_IRQ_STATE_DISABLE:
5764         case AMDGPU_IRQ_STATE_ENABLE:
5765                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5766                                PRIV_INSTR_INT_ENABLE,
5767                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5768                 break;
5769         default:
5770                 break;
5771         }
5772
5773         return 0;
5774 }
5775
5776 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5777         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5778                         CP_ECC_ERROR_INT_ENABLE, 1)
5779
5780 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5781         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5782                         CP_ECC_ERROR_INT_ENABLE, 0)
5783
5784 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5785                                               struct amdgpu_irq_src *source,
5786                                               unsigned type,
5787                                               enum amdgpu_interrupt_state state)
5788 {
5789         switch (state) {
5790         case AMDGPU_IRQ_STATE_DISABLE:
5791                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5792                                 CP_ECC_ERROR_INT_ENABLE, 0);
5793                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5794                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5795                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5796                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5797                 break;
5798
5799         case AMDGPU_IRQ_STATE_ENABLE:
5800                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5801                                 CP_ECC_ERROR_INT_ENABLE, 1);
5802                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5803                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5804                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5805                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5806                 break;
5807         default:
5808                 break;
5809         }
5810
5811         return 0;
5812 }
5813
5814
5815 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5816                                             struct amdgpu_irq_src *src,
5817                                             unsigned type,
5818                                             enum amdgpu_interrupt_state state)
5819 {
5820         switch (type) {
5821         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5822                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5823                 break;
5824         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5825                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5826                 break;
5827         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5828                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5829                 break;
5830         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5831                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5832                 break;
5833         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5834                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5835                 break;
5836         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5837                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5838                 break;
5839         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5840                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5841                 break;
5842         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5843                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5844                 break;
5845         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5846                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5847                 break;
5848         default:
5849                 break;
5850         }
5851         return 0;
5852 }
5853
5854 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5855                             struct amdgpu_irq_src *source,
5856                             struct amdgpu_iv_entry *entry)
5857 {
5858         int i;
5859         u8 me_id, pipe_id, queue_id;
5860         struct amdgpu_ring *ring;
5861
5862         DRM_DEBUG("IH: CP EOP\n");
5863         me_id = (entry->ring_id & 0x0c) >> 2;
5864         pipe_id = (entry->ring_id & 0x03) >> 0;
5865         queue_id = (entry->ring_id & 0x70) >> 4;
5866
5867         switch (me_id) {
5868         case 0:
5869                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5870                 break;
5871         case 1:
5872         case 2:
5873                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5874                         ring = &adev->gfx.compute_ring[i];
5875                         /* Per-queue interrupt is supported for MEC starting from VI.
5876                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5877                           */
5878                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5879                                 amdgpu_fence_process(ring);
5880                 }
5881                 break;
5882         }
5883         return 0;
5884 }
5885
5886 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5887                            struct amdgpu_iv_entry *entry)
5888 {
5889         u8 me_id, pipe_id, queue_id;
5890         struct amdgpu_ring *ring;
5891         int i;
5892
5893         me_id = (entry->ring_id & 0x0c) >> 2;
5894         pipe_id = (entry->ring_id & 0x03) >> 0;
5895         queue_id = (entry->ring_id & 0x70) >> 4;
5896
5897         switch (me_id) {
5898         case 0:
5899                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5900                 break;
5901         case 1:
5902         case 2:
5903                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5904                         ring = &adev->gfx.compute_ring[i];
5905                         if (ring->me == me_id && ring->pipe == pipe_id &&
5906                             ring->queue == queue_id)
5907                                 drm_sched_fault(&ring->sched);
5908                 }
5909                 break;
5910         }
5911 }
5912
5913 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5914                                  struct amdgpu_irq_src *source,
5915                                  struct amdgpu_iv_entry *entry)
5916 {
5917         DRM_ERROR("Illegal register access in command stream\n");
5918         gfx_v9_0_fault(adev, entry);
5919         return 0;
5920 }
5921
5922 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5923                                   struct amdgpu_irq_src *source,
5924                                   struct amdgpu_iv_entry *entry)
5925 {
5926         DRM_ERROR("Illegal instruction in command stream\n");
5927         gfx_v9_0_fault(adev, entry);
5928         return 0;
5929 }
5930
5931
5932 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5933         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5934           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5935           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5936         },
5937         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5938           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5939           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5940         },
5941         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5942           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5943           0, 0
5944         },
5945         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5946           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5947           0, 0
5948         },
5949         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5950           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5951           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5952         },
5953         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5954           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5955           0, 0
5956         },
5957         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5958           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5959           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5960         },
5961         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5962           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5963           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5964         },
5965         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5966           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5967           0, 0
5968         },
5969         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5970           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5971           0, 0
5972         },
5973         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5974           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5975           0, 0
5976         },
5977         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5978           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5979           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5980         },
5981         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5982           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5983           0, 0
5984         },
5985         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5986           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5987           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5988         },
5989         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5990           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5991           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5992           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5993         },
5994         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5995           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5996           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5997           0, 0
5998         },
5999         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6000           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6001           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6002           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6003         },
6004         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6005           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6006           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6007           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6008         },
6009         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6010           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6011           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6012           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6013         },
6014         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6015           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6016           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6017           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6018         },
6019         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6020           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6021           0, 0
6022         },
6023         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6024           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6025           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6026         },
6027         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6028           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6029           0, 0
6030         },
6031         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6032           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6033           0, 0
6034         },
6035         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6036           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6037           0, 0
6038         },
6039         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6040           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6041           0, 0
6042         },
6043         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6044           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6045           0, 0
6046         },
6047         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6048           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6049           0, 0
6050         },
6051         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6052           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6053           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6054         },
6055         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6056           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6057           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6058         },
6059         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6060           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6061           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6062         },
6063         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6064           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6065           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6066         },
6067         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6068           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6069           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6070         },
6071         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6072           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6073           0, 0
6074         },
6075         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6076           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6077           0, 0
6078         },
6079         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6080           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6081           0, 0
6082         },
6083         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6084           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6085           0, 0
6086         },
6087         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6088           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6089           0, 0
6090         },
6091         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6092           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6093           0, 0
6094         },
6095         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6096           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6097           0, 0
6098         },
6099         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6100           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6101           0, 0
6102         },
6103         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6104           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6105           0, 0
6106         },
6107         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6108           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6109           0, 0
6110         },
6111         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6112           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6113           0, 0
6114         },
6115         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6116           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6117           0, 0
6118         },
6119         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6120           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6121           0, 0
6122         },
6123         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6124           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6125           0, 0
6126         },
6127         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6128           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6129           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6130         },
6131         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6132           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6133           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6134         },
6135         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6136           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6137           0, 0
6138         },
6139         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6140           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6141           0, 0
6142         },
6143         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6144           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6145           0, 0
6146         },
6147         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6148           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6149           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6150         },
6151         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6152           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6153           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6154         },
6155         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6156           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6157           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6158         },
6159         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6160           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6161           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6162         },
6163         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6164           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6165           0, 0
6166         },
6167         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6168           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6169           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6170         },
6171         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6172           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6173           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6174         },
6175         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6176           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6177           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6178         },
6179         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6180           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6181           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6182         },
6183         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6184           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6185           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6186         },
6187         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6188           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6189           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6190         },
6191         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6192           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6193           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6194         },
6195         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6196           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6197           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6198         },
6199         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6200           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6201           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6202         },
6203         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6204           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6205           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6206         },
6207         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6208           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6209           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6210         },
6211         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6212           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6213           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6214         },
6215         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6216           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6217           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6218         },
6219         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6220           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6221           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6222         },
6223         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6224           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6225           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6226         },
6227         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6228           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6229           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6230         },
6231         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6232           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6233           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6234         },
6235         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6236           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6237           0, 0
6238         },
6239         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6240           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6241           0, 0
6242         },
6243         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6244           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6245           0, 0
6246         },
6247         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6248           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6249           0, 0
6250         },
6251         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6252           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6253           0, 0
6254         },
6255         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6256           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6257           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6258         },
6259         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6260           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6261           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6262         },
6263         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6264           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6265           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6266         },
6267         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6268           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6269           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6270         },
6271         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6272           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6273           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6274         },
6275         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6276           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6277           0, 0
6278         },
6279         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6280           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6281           0, 0
6282         },
6283         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6284           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6285           0, 0
6286         },
6287         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6288           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6289           0, 0
6290         },
6291         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6292           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6293           0, 0
6294         },
6295         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6296           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6297           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6298         },
6299         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6300           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6301           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6302         },
6303         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6304           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6305           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6306         },
6307         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6308           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6309           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6310         },
6311         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6312           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6313           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6314         },
6315         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6316           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6317           0, 0
6318         },
6319         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6320           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6321           0, 0
6322         },
6323         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6324           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6325           0, 0
6326         },
6327         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6328           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6329           0, 0
6330         },
6331         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6332           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6333           0, 0
6334         },
6335         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6336           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6337           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6338         },
6339         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6340           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6341           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6342         },
6343         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6344           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6345           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6346         },
6347         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6348           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6349           0, 0
6350         },
6351         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6352           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6353           0, 0
6354         },
6355         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6356           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6357           0, 0
6358         },
6359         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6360           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6361           0, 0
6362         },
6363         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6364           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6365           0, 0
6366         },
6367         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6368           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6369           0, 0
6370         }
6371 };
6372
6373 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6374                                      void *inject_if)
6375 {
6376         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6377         int ret;
6378         struct ta_ras_trigger_error_input block_info = { 0 };
6379
6380         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6381                 return -EINVAL;
6382
6383         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6384                 return -EINVAL;
6385
6386         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6387                 return -EPERM;
6388
6389         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6390               info->head.type)) {
6391                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6392                         ras_gfx_subblocks[info->head.sub_block_index].name,
6393                         info->head.type);
6394                 return -EPERM;
6395         }
6396
6397         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6398               info->head.type)) {
6399                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6400                         ras_gfx_subblocks[info->head.sub_block_index].name,
6401                         info->head.type);
6402                 return -EPERM;
6403         }
6404
6405         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6406         block_info.sub_block_index =
6407                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6408         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6409         block_info.address = info->address;
6410         block_info.value = info->value;
6411
6412         mutex_lock(&adev->grbm_idx_mutex);
6413         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6414         mutex_unlock(&adev->grbm_idx_mutex);
6415
6416         return ret;
6417 }
6418
6419 static const char *vml2_mems[] = {
6420         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6421         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6422         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6423         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6424         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6425         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6426         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6427         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6428         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6429         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6430         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6431         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6432         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6433         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6434         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6435         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6436 };
6437
6438 static const char *vml2_walker_mems[] = {
6439         "UTC_VML2_CACHE_PDE0_MEM0",
6440         "UTC_VML2_CACHE_PDE0_MEM1",
6441         "UTC_VML2_CACHE_PDE1_MEM0",
6442         "UTC_VML2_CACHE_PDE1_MEM1",
6443         "UTC_VML2_CACHE_PDE2_MEM0",
6444         "UTC_VML2_CACHE_PDE2_MEM1",
6445         "UTC_VML2_RDIF_LOG_FIFO",
6446 };
6447
6448 static const char *atc_l2_cache_2m_mems[] = {
6449         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6450         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6451         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6452         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6453 };
6454
6455 static const char *atc_l2_cache_4k_mems[] = {
6456         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6457         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6458         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6459         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6460         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6461         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6462         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6463         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6464         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6465         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6466         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6467         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6468         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6469         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6470         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6471         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6472         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6473         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6474         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6475         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6476         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6477         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6478         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6479         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6480         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6481         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6482         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6483         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6484         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6485         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6486         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6487         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6488 };
6489
6490 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6491                                          struct ras_err_data *err_data)
6492 {
6493         uint32_t i, data;
6494         uint32_t sec_count, ded_count;
6495
6496         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6497         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6498         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6499         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6500         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6501         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6502         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6503         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6504
6505         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6506                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6507                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6508
6509                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6510                 if (sec_count) {
6511                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6512                                 "SEC %d\n", i, vml2_mems[i], sec_count);
6513                         err_data->ce_count += sec_count;
6514                 }
6515
6516                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6517                 if (ded_count) {
6518                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6519                                 "DED %d\n", i, vml2_mems[i], ded_count);
6520                         err_data->ue_count += ded_count;
6521                 }
6522         }
6523
6524         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6525                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6526                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6527
6528                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6529                                                 SEC_COUNT);
6530                 if (sec_count) {
6531                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6532                                 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6533                         err_data->ce_count += sec_count;
6534                 }
6535
6536                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6537                                                 DED_COUNT);
6538                 if (ded_count) {
6539                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6540                                 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6541                         err_data->ue_count += ded_count;
6542                 }
6543         }
6544
6545         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6546                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6547                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6548
6549                 sec_count = (data & 0x00006000L) >> 0xd;
6550                 if (sec_count) {
6551                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6552                                 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6553                                 sec_count);
6554                         err_data->ce_count += sec_count;
6555                 }
6556         }
6557
6558         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6559                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6560                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6561
6562                 sec_count = (data & 0x00006000L) >> 0xd;
6563                 if (sec_count) {
6564                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6565                                 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6566                                 sec_count);
6567                         err_data->ce_count += sec_count;
6568                 }
6569
6570                 ded_count = (data & 0x00018000L) >> 0xf;
6571                 if (ded_count) {
6572                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6573                                 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6574                                 ded_count);
6575                         err_data->ue_count += ded_count;
6576                 }
6577         }
6578
6579         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6580         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6581         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6582         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6583
6584         return 0;
6585 }
6586
6587 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6588         const struct soc15_reg_entry *reg,
6589         uint32_t se_id, uint32_t inst_id, uint32_t value,
6590         uint32_t *sec_count, uint32_t *ded_count)
6591 {
6592         uint32_t i;
6593         uint32_t sec_cnt, ded_cnt;
6594
6595         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6596                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6597                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6598                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6599                         continue;
6600
6601                 sec_cnt = (value &
6602                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6603                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6604                 if (sec_cnt) {
6605                         dev_info(adev->dev, "GFX SubBlock %s, "
6606                                 "Instance[%d][%d], SEC %d\n",
6607                                 gfx_v9_0_ras_fields[i].name,
6608                                 se_id, inst_id,
6609                                 sec_cnt);
6610                         *sec_count += sec_cnt;
6611                 }
6612
6613                 ded_cnt = (value &
6614                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6615                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6616                 if (ded_cnt) {
6617                         dev_info(adev->dev, "GFX SubBlock %s, "
6618                                 "Instance[%d][%d], DED %d\n",
6619                                 gfx_v9_0_ras_fields[i].name,
6620                                 se_id, inst_id,
6621                                 ded_cnt);
6622                         *ded_count += ded_cnt;
6623                 }
6624         }
6625
6626         return 0;
6627 }
6628
6629 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6630 {
6631         int i, j, k;
6632
6633         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6634                 return;
6635
6636         /* read back registers to clear the counters */
6637         mutex_lock(&adev->grbm_idx_mutex);
6638         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6639                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6640                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6641                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6642                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6643                         }
6644                 }
6645         }
6646         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6647         mutex_unlock(&adev->grbm_idx_mutex);
6648
6649         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6650         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6651         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6652         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6653         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6654         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6655         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6656         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6657
6658         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6659                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6660                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6661         }
6662
6663         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6664                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6665                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6666         }
6667
6668         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6669                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6670                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6671         }
6672
6673         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6674                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6675                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6676         }
6677
6678         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6679         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6680         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6681         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6682 }
6683
6684 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6685                                           void *ras_error_status)
6686 {
6687         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6688         uint32_t sec_count = 0, ded_count = 0;
6689         uint32_t i, j, k;
6690         uint32_t reg_value;
6691
6692         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6693                 return -EINVAL;
6694
6695         err_data->ue_count = 0;
6696         err_data->ce_count = 0;
6697
6698         mutex_lock(&adev->grbm_idx_mutex);
6699
6700         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6701                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6702                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6703                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6704                                 reg_value =
6705                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6706                                 if (reg_value)
6707                                         gfx_v9_0_ras_error_count(adev,
6708                                                 &gfx_v9_0_edc_counter_regs[i],
6709                                                 j, k, reg_value,
6710                                                 &sec_count, &ded_count);
6711                         }
6712                 }
6713         }
6714
6715         err_data->ce_count += sec_count;
6716         err_data->ue_count += ded_count;
6717
6718         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6719         mutex_unlock(&adev->grbm_idx_mutex);
6720
6721         gfx_v9_0_query_utc_edc_status(adev, err_data);
6722
6723         return 0;
6724 }
6725
6726 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6727 {
6728         const unsigned int cp_coher_cntl =
6729                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6730                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6731                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6732                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6733                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6734
6735         /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6736         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6737         amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6738         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6739         amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6740         amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6741         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6742         amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6743 }
6744
6745 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6746                                         uint32_t pipe, bool enable)
6747 {
6748         struct amdgpu_device *adev = ring->adev;
6749         uint32_t val;
6750         uint32_t wcl_cs_reg;
6751
6752         /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6753         val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
6754
6755         switch (pipe) {
6756         case 0:
6757                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
6758                 break;
6759         case 1:
6760                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
6761                 break;
6762         case 2:
6763                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
6764                 break;
6765         case 3:
6766                 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
6767                 break;
6768         default:
6769                 DRM_DEBUG("invalid pipe %d\n", pipe);
6770                 return;
6771         }
6772
6773         amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6774
6775 }
6776 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6777 {
6778         struct amdgpu_device *adev = ring->adev;
6779         uint32_t val;
6780         int i;
6781
6782
6783         /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6784          * number of gfx waves. Setting 5 bit will make sure gfx only gets
6785          * around 25% of gpu resources.
6786          */
6787         val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6788         amdgpu_ring_emit_wreg(ring,
6789                               SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
6790                               val);
6791
6792         /* Restrict waves for normal/low priority compute queues as well
6793          * to get best QoS for high priority compute jobs.
6794          *
6795          * amdgpu controls only 1st ME(0-3 CS pipes).
6796          */
6797         for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6798                 if (i != ring->pipe)
6799                         gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
6800
6801         }
6802 }
6803
6804 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6805         .name = "gfx_v9_0",
6806         .early_init = gfx_v9_0_early_init,
6807         .late_init = gfx_v9_0_late_init,
6808         .sw_init = gfx_v9_0_sw_init,
6809         .sw_fini = gfx_v9_0_sw_fini,
6810         .hw_init = gfx_v9_0_hw_init,
6811         .hw_fini = gfx_v9_0_hw_fini,
6812         .suspend = gfx_v9_0_suspend,
6813         .resume = gfx_v9_0_resume,
6814         .is_idle = gfx_v9_0_is_idle,
6815         .wait_for_idle = gfx_v9_0_wait_for_idle,
6816         .soft_reset = gfx_v9_0_soft_reset,
6817         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6818         .set_powergating_state = gfx_v9_0_set_powergating_state,
6819         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6820 };
6821
6822 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6823         .type = AMDGPU_RING_TYPE_GFX,
6824         .align_mask = 0xff,
6825         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6826         .support_64bit_ptrs = true,
6827         .vmhub = AMDGPU_GFXHUB_0,
6828         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6829         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6830         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6831         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6832                 5 +  /* COND_EXEC */
6833                 7 +  /* PIPELINE_SYNC */
6834                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6835                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6836                 2 + /* VM_FLUSH */
6837                 8 +  /* FENCE for VM_FLUSH */
6838                 20 + /* GDS switch */
6839                 4 + /* double SWITCH_BUFFER,
6840                        the first COND_EXEC jump to the place just
6841                            prior to this double SWITCH_BUFFER  */
6842                 5 + /* COND_EXEC */
6843                 7 +      /*     HDP_flush */
6844                 4 +      /*     VGT_flush */
6845                 14 + /* CE_META */
6846                 31 + /* DE_META */
6847                 3 + /* CNTX_CTRL */
6848                 5 + /* HDP_INVL */
6849                 8 + 8 + /* FENCE x2 */
6850                 2 + /* SWITCH_BUFFER */
6851                 7, /* gfx_v9_0_emit_mem_sync */
6852         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6853         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6854         .emit_fence = gfx_v9_0_ring_emit_fence,
6855         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6856         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6857         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6858         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6859         .test_ring = gfx_v9_0_ring_test_ring,
6860         .test_ib = gfx_v9_0_ring_test_ib,
6861         .insert_nop = amdgpu_ring_insert_nop,
6862         .pad_ib = amdgpu_ring_generic_pad_ib,
6863         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6864         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6865         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6866         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6867         .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6868         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6869         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6870         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6871         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6872         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6873 };
6874
6875 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6876         .type = AMDGPU_RING_TYPE_COMPUTE,
6877         .align_mask = 0xff,
6878         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6879         .support_64bit_ptrs = true,
6880         .vmhub = AMDGPU_GFXHUB_0,
6881         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6882         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6883         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6884         .emit_frame_size =
6885                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6886                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6887                 5 + /* hdp invalidate */
6888                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6889                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6890                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6891                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6892                 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6893                 7 + /* gfx_v9_0_emit_mem_sync */
6894                 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6895                 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6896         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6897         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6898         .emit_fence = gfx_v9_0_ring_emit_fence,
6899         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6900         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6901         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6902         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6903         .test_ring = gfx_v9_0_ring_test_ring,
6904         .test_ib = gfx_v9_0_ring_test_ib,
6905         .insert_nop = amdgpu_ring_insert_nop,
6906         .pad_ib = amdgpu_ring_generic_pad_ib,
6907         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6908         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6909         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6910         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6911         .emit_wave_limit = gfx_v9_0_emit_wave_limit,
6912 };
6913
6914 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6915         .type = AMDGPU_RING_TYPE_KIQ,
6916         .align_mask = 0xff,
6917         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6918         .support_64bit_ptrs = true,
6919         .vmhub = AMDGPU_GFXHUB_0,
6920         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6921         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6922         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6923         .emit_frame_size =
6924                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6925                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6926                 5 + /* hdp invalidate */
6927                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6928                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6929                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6930                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6931                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6932         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6933         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6934         .test_ring = gfx_v9_0_ring_test_ring,
6935         .insert_nop = amdgpu_ring_insert_nop,
6936         .pad_ib = amdgpu_ring_generic_pad_ib,
6937         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6938         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6939         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6940         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6941 };
6942
6943 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6944 {
6945         int i;
6946
6947         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6948
6949         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6950                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6951
6952         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6953                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6954 }
6955
6956 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6957         .set = gfx_v9_0_set_eop_interrupt_state,
6958         .process = gfx_v9_0_eop_irq,
6959 };
6960
6961 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6962         .set = gfx_v9_0_set_priv_reg_fault_state,
6963         .process = gfx_v9_0_priv_reg_irq,
6964 };
6965
6966 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6967         .set = gfx_v9_0_set_priv_inst_fault_state,
6968         .process = gfx_v9_0_priv_inst_irq,
6969 };
6970
6971 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6972         .set = gfx_v9_0_set_cp_ecc_error_state,
6973         .process = amdgpu_gfx_cp_ecc_error_irq,
6974 };
6975
6976
6977 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6978 {
6979         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6980         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6981
6982         adev->gfx.priv_reg_irq.num_types = 1;
6983         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6984
6985         adev->gfx.priv_inst_irq.num_types = 1;
6986         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6987
6988         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6989         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6990 }
6991
6992 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6993 {
6994         switch (adev->asic_type) {
6995         case CHIP_VEGA10:
6996         case CHIP_VEGA12:
6997         case CHIP_VEGA20:
6998         case CHIP_RAVEN:
6999         case CHIP_ARCTURUS:
7000         case CHIP_RENOIR:
7001         case CHIP_ALDEBARAN:
7002                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7003                 break;
7004         default:
7005                 break;
7006         }
7007 }
7008
7009 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7010 {
7011         /* init asci gds info */
7012         switch (adev->asic_type) {
7013         case CHIP_VEGA10:
7014         case CHIP_VEGA12:
7015         case CHIP_VEGA20:
7016                 adev->gds.gds_size = 0x10000;
7017                 break;
7018         case CHIP_RAVEN:
7019         case CHIP_ARCTURUS:
7020                 adev->gds.gds_size = 0x1000;
7021                 break;
7022         case CHIP_ALDEBARAN:
7023                 /* aldebaran removed all the GDS internal memory,
7024                  * only support GWS opcode in kernel, like barrier
7025                  * semaphore.etc */
7026                 adev->gds.gds_size = 0;
7027                 break;
7028         default:
7029                 adev->gds.gds_size = 0x10000;
7030                 break;
7031         }
7032
7033         switch (adev->asic_type) {
7034         case CHIP_VEGA10:
7035         case CHIP_VEGA20:
7036                 adev->gds.gds_compute_max_wave_id = 0x7ff;
7037                 break;
7038         case CHIP_VEGA12:
7039                 adev->gds.gds_compute_max_wave_id = 0x27f;
7040                 break;
7041         case CHIP_RAVEN:
7042                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7043                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7044                 else
7045                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7046                 break;
7047         case CHIP_ARCTURUS:
7048                 adev->gds.gds_compute_max_wave_id = 0xfff;
7049                 break;
7050         case CHIP_ALDEBARAN:
7051                 /* deprecated for Aldebaran, no usage at all */
7052                 adev->gds.gds_compute_max_wave_id = 0;
7053                 break;
7054         default:
7055                 /* this really depends on the chip */
7056                 adev->gds.gds_compute_max_wave_id = 0x7ff;
7057                 break;
7058         }
7059
7060         adev->gds.gws_size = 64;
7061         adev->gds.oa_size = 16;
7062 }
7063
7064 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7065                                                  u32 bitmap)
7066 {
7067         u32 data;
7068
7069         if (!bitmap)
7070                 return;
7071
7072         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7073         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7074
7075         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7076 }
7077
7078 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7079 {
7080         u32 data, mask;
7081
7082         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7083         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7084
7085         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7086         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7087
7088         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7089
7090         return (~data) & mask;
7091 }
7092
7093 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7094                                  struct amdgpu_cu_info *cu_info)
7095 {
7096         int i, j, k, counter, active_cu_number = 0;
7097         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7098         unsigned disable_masks[4 * 4];
7099
7100         if (!adev || !cu_info)
7101                 return -EINVAL;
7102
7103         /*
7104          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7105          */
7106         if (adev->gfx.config.max_shader_engines *
7107                 adev->gfx.config.max_sh_per_se > 16)
7108                 return -EINVAL;
7109
7110         amdgpu_gfx_parse_disable_cu(disable_masks,
7111                                     adev->gfx.config.max_shader_engines,
7112                                     adev->gfx.config.max_sh_per_se);
7113
7114         mutex_lock(&adev->grbm_idx_mutex);
7115         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7116                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7117                         mask = 1;
7118                         ao_bitmap = 0;
7119                         counter = 0;
7120                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
7121                         gfx_v9_0_set_user_cu_inactive_bitmap(
7122                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7123                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7124
7125                         /*
7126                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
7127                          * 4x4 size array, and it's usually suitable for Vega
7128                          * ASICs which has 4*2 SE/SH layout.
7129                          * But for Arcturus, SE/SH layout is changed to 8*1.
7130                          * To mostly reduce the impact, we make it compatible
7131                          * with current bitmap array as below:
7132                          *    SE4,SH0 --> bitmap[0][1]
7133                          *    SE5,SH0 --> bitmap[1][1]
7134                          *    SE6,SH0 --> bitmap[2][1]
7135                          *    SE7,SH0 --> bitmap[3][1]
7136                          */
7137                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
7138
7139                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7140                                 if (bitmap & mask) {
7141                                         if (counter < adev->gfx.config.max_cu_per_sh)
7142                                                 ao_bitmap |= mask;
7143                                         counter ++;
7144                                 }
7145                                 mask <<= 1;
7146                         }
7147                         active_cu_number += counter;
7148                         if (i < 2 && j < 2)
7149                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7150                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7151                 }
7152         }
7153         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7154         mutex_unlock(&adev->grbm_idx_mutex);
7155
7156         cu_info->number = active_cu_number;
7157         cu_info->ao_cu_mask = ao_cu_mask;
7158         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7159
7160         return 0;
7161 }
7162
7163 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7164 {
7165         .type = AMD_IP_BLOCK_TYPE_GFX,
7166         .major = 9,
7167         .minor = 0,
7168         .rev = 0,
7169         .funcs = &gfx_v9_0_ip_funcs,
7170 };