drm/amdgpu: implement more ib pools (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
42
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
46
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
48
49 #include "amdgpu_ras.h"
50
51 #include "gfx_v9_4.h"
52
53 #define GFX9_NUM_GFX_RINGS     1
54 #define GFX9_MEC_HPD_SIZE 4096
55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
56 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
57
58 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
59 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
60 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
61 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
62 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
63 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
64
65 #define mmGCEA_PROBE_MAP                        0x070c
66 #define mmGCEA_PROBE_MAP_BASE_IDX               0
67
68 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
74
75 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
81
82 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
88
89 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
90 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
91 MODULE_FIRMWARE("amdgpu/raven_me.bin");
92 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
94 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
95
96 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
103
104 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
110 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
111
112 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
113 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
121 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
122
123 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
124 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
125 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
126 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
127 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
128 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
129 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
130 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
131 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
132 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
133 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
134 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
135
136 enum ta_ras_gfx_subblock {
137         /*CPC*/
138         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
139         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
140         TA_RAS_BLOCK__GFX_CPC_UCODE,
141         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
142         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
143         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
144         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
145         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
146         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
147         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
148         /* CPF*/
149         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
150         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
151         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
152         TA_RAS_BLOCK__GFX_CPF_TAG,
153         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
154         /* CPG*/
155         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
156         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
157         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
158         TA_RAS_BLOCK__GFX_CPG_TAG,
159         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
160         /* GDS*/
161         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
162         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
163         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
164         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
165         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
166         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
167         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
168         /* SPI*/
169         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
170         /* SQ*/
171         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
172         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
173         TA_RAS_BLOCK__GFX_SQ_LDS_D,
174         TA_RAS_BLOCK__GFX_SQ_LDS_I,
175         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
176         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
177         /* SQC (3 ranges)*/
178         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
179         /* SQC range 0*/
180         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
181         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
182                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
183         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
184         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
185         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
186         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
187         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
188         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
189         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
190                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
191         /* SQC range 1*/
192         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
193         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
194                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
195         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
196         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
197         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
198         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
199         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
200         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
201         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
202         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
203         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
204                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
205         /* SQC range 2*/
206         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
207         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
208                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
209         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
210         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
211         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
212         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
213         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
214         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
215         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
216         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
217         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
218                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
219         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
220         /* TA*/
221         TA_RAS_BLOCK__GFX_TA_INDEX_START,
222         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
223         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
224         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
225         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
226         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
227         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
228         /* TCA*/
229         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
230         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
231         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
232         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
233         /* TCC (5 sub-ranges)*/
234         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
235         /* TCC range 0*/
236         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
237         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
238         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
239         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
240         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
241         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
242         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
243         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
244         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
245         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
246         /* TCC range 1*/
247         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
248         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
249         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
250         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
251                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
252         /* TCC range 2*/
253         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
254         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
255         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
256         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
257         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
258         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
259         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
260         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
261         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
262         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
263                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
264         /* TCC range 3*/
265         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
266         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
267         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
268         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
269                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
270         /* TCC range 4*/
271         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
272         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
273                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
274         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
275         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
276                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
277         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
278         /* TCI*/
279         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
280         /* TCP*/
281         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
282         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
283         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
284         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
285         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
286         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
287         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
288         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
289         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
290         /* TD*/
291         TA_RAS_BLOCK__GFX_TD_INDEX_START,
292         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
293         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
294         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
295         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
296         /* EA (3 sub-ranges)*/
297         TA_RAS_BLOCK__GFX_EA_INDEX_START,
298         /* EA range 0*/
299         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
300         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
301         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
302         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
303         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
304         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
305         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
306         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
307         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
308         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
309         /* EA range 1*/
310         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
311         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
312         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
313         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
314         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
315         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
316         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
317         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
318         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
319         /* EA range 2*/
320         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
321         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
322         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
323         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
324         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
325         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
326         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
327         /* UTC VM L2 bank*/
328         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
329         /* UTC VM walker*/
330         TA_RAS_BLOCK__UTC_VML2_WALKER,
331         /* UTC ATC L2 2MB cache*/
332         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
333         /* UTC ATC L2 4KB cache*/
334         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
335         TA_RAS_BLOCK__GFX_MAX
336 };
337
338 struct ras_gfx_subblock {
339         unsigned char *name;
340         int ta_subblock;
341         int hw_supported_error_type;
342         int sw_supported_error_type;
343 };
344
345 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
346         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
347                 #subblock,                                                     \
348                 TA_RAS_BLOCK__##subblock,                                      \
349                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
350                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
351         }
352
353 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
354         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
355         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
356         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
357         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
358         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
359         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
360         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
361         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
362         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
363         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
365         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
367         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
368         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
371                              0),
372         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
373                              0),
374         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
375         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
377         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
378         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
380         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
381         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
382                              0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
384                              0),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
386                              0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
388                              0),
389         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
390                              0, 0),
391         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
392                              0),
393         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
394                              1),
395         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
396                              0, 0, 0),
397         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
398                              0),
399         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
400                              0),
401         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
402                              0),
403         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
404                              0),
405         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
406                              0),
407         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
408                              0, 0),
409         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
410                              0),
411         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
412                              0),
413         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
414                              0, 0, 0),
415         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
416                              0),
417         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
418                              0),
419         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
420                              0),
421         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
422                              0),
423         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
424                              0),
425         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
426                              0, 0),
427         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
428                              0),
429         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
430         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
431         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
433         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
434         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
435         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
438                              1),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
440                              1),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
442                              1),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
444                              0),
445         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
446                              0),
447         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
449         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
459                              0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
462                              0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
464                              0, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
466                              0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
469         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
479         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
483         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
501 };
502
503 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
504 {
505         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
506         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
507         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
508         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
509         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
510         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
511         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
525 };
526
527 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
528 {
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
532         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
535         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
547 };
548
549 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
550 {
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
554         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
557         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
562 };
563
564 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
565 {
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
569         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
572         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
590 };
591
592 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
593 {
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
597         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
598         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
599         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
600         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
601 };
602
603 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
604 {
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
608         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
611         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
624 };
625
626 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
627 {
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
631         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
634         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
640 };
641
642 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
643 {
644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
646         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
647 };
648
649 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
650 {
651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
654         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
667 };
668
669 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
670 {
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
674         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
677         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
684 };
685
686 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
687 {
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
691         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
694         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
698 };
699
700 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
701         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
702         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
703 };
704
705 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
706 {
707         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
708         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
709         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
710         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
711         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
712         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
713         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
714         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
715 };
716
717 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
718 {
719         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
720         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
721         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
722         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
723         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
724         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
725         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
726         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
727 };
728
729 void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
730 {
731         static void *scratch_reg0;
732         static void *scratch_reg1;
733         static void *scratch_reg2;
734         static void *scratch_reg3;
735         static void *spare_int;
736         static uint32_t grbm_cntl;
737         static uint32_t grbm_idx;
738
739         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
740         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
741         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
742         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
743         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
744
745         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
746         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
747
748         if (amdgpu_sriov_runtime(adev)) {
749                 pr_err("shouldn't call rlcg write register during runtime\n");
750                 return;
751         }
752
753         if (offset == grbm_cntl || offset == grbm_idx) {
754                 if (offset  == grbm_cntl)
755                         writel(v, scratch_reg2);
756                 else if (offset == grbm_idx)
757                         writel(v, scratch_reg3);
758
759                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
760         } else {
761                 uint32_t i = 0;
762                 uint32_t retries = 50000;
763
764                 writel(v, scratch_reg0);
765                 writel(offset | 0x80000000, scratch_reg1);
766                 writel(1, spare_int);
767                 for (i = 0; i < retries; i++) {
768                         u32 tmp;
769
770                         tmp = readl(scratch_reg1);
771                         if (!(tmp & 0x80000000))
772                                 break;
773
774                         udelay(10);
775                 }
776                 if (i >= retries)
777                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
778         }
779
780 }
781
782 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
783 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
784 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
785 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
786
787 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
788 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
789 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
790 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
791 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
792                                  struct amdgpu_cu_info *cu_info);
793 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
794 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
795 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
796 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
797 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
798                                           void *ras_error_status);
799 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
800                                      void *inject_if);
801 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
802
803 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
804                                 uint64_t queue_mask)
805 {
806         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
807         amdgpu_ring_write(kiq_ring,
808                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
809                 /* vmid_mask:0* queue_type:0 (KIQ) */
810                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
811         amdgpu_ring_write(kiq_ring,
812                         lower_32_bits(queue_mask));     /* queue mask lo */
813         amdgpu_ring_write(kiq_ring,
814                         upper_32_bits(queue_mask));     /* queue mask hi */
815         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
816         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
817         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
818         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
819 }
820
821 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
822                                  struct amdgpu_ring *ring)
823 {
824         struct amdgpu_device *adev = kiq_ring->adev;
825         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
826         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
827         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
828
829         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
830         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
831         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
832                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
833                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
834                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
835                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
836                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
837                          /*queue_type: normal compute queue */
838                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
839                          /* alloc format: all_on_one_pipe */
840                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
841                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
842                          /* num_queues: must be 1 */
843                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
844         amdgpu_ring_write(kiq_ring,
845                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
846         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
847         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
848         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
849         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
850 }
851
852 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
853                                    struct amdgpu_ring *ring,
854                                    enum amdgpu_unmap_queues_action action,
855                                    u64 gpu_addr, u64 seq)
856 {
857         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
858
859         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
860         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
861                           PACKET3_UNMAP_QUEUES_ACTION(action) |
862                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
863                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
864                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
865         amdgpu_ring_write(kiq_ring,
866                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
867
868         if (action == PREEMPT_QUEUES_NO_UNMAP) {
869                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
870                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
871                 amdgpu_ring_write(kiq_ring, seq);
872         } else {
873                 amdgpu_ring_write(kiq_ring, 0);
874                 amdgpu_ring_write(kiq_ring, 0);
875                 amdgpu_ring_write(kiq_ring, 0);
876         }
877 }
878
879 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
880                                    struct amdgpu_ring *ring,
881                                    u64 addr,
882                                    u64 seq)
883 {
884         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
885
886         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
887         amdgpu_ring_write(kiq_ring,
888                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
889                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
890                           PACKET3_QUERY_STATUS_COMMAND(2));
891         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
892         amdgpu_ring_write(kiq_ring,
893                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
894                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
895         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
896         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
897         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
898         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
899 }
900
901 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
902                                 uint16_t pasid, uint32_t flush_type,
903                                 bool all_hub)
904 {
905         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
906         amdgpu_ring_write(kiq_ring,
907                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
908                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
909                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
910                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
911 }
912
913 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
914         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
915         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
916         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
917         .kiq_query_status = gfx_v9_0_kiq_query_status,
918         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
919         .set_resources_size = 8,
920         .map_queues_size = 7,
921         .unmap_queues_size = 6,
922         .query_status_size = 7,
923         .invalidate_tlbs_size = 2,
924 };
925
926 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
927 {
928         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
929 }
930
931 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
932 {
933         switch (adev->asic_type) {
934         case CHIP_VEGA10:
935                 soc15_program_register_sequence(adev,
936                                                 golden_settings_gc_9_0,
937                                                 ARRAY_SIZE(golden_settings_gc_9_0));
938                 soc15_program_register_sequence(adev,
939                                                 golden_settings_gc_9_0_vg10,
940                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
941                 break;
942         case CHIP_VEGA12:
943                 soc15_program_register_sequence(adev,
944                                                 golden_settings_gc_9_2_1,
945                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
946                 soc15_program_register_sequence(adev,
947                                                 golden_settings_gc_9_2_1_vg12,
948                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
949                 break;
950         case CHIP_VEGA20:
951                 soc15_program_register_sequence(adev,
952                                                 golden_settings_gc_9_0,
953                                                 ARRAY_SIZE(golden_settings_gc_9_0));
954                 soc15_program_register_sequence(adev,
955                                                 golden_settings_gc_9_0_vg20,
956                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
957                 break;
958         case CHIP_ARCTURUS:
959                 soc15_program_register_sequence(adev,
960                                                 golden_settings_gc_9_4_1_arct,
961                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
962                 break;
963         case CHIP_RAVEN:
964                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
965                                                 ARRAY_SIZE(golden_settings_gc_9_1));
966                 if (adev->rev_id >= 8)
967                         soc15_program_register_sequence(adev,
968                                                         golden_settings_gc_9_1_rv2,
969                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
970                 else
971                         soc15_program_register_sequence(adev,
972                                                         golden_settings_gc_9_1_rv1,
973                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
974                 break;
975          case CHIP_RENOIR:
976                 soc15_program_register_sequence(adev,
977                                                 golden_settings_gc_9_1_rn,
978                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
979                 return; /* for renoir, don't need common goldensetting */
980         default:
981                 break;
982         }
983
984         if (adev->asic_type != CHIP_ARCTURUS)
985                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
986                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
987 }
988
989 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
990 {
991         adev->gfx.scratch.num_reg = 8;
992         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
993         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
994 }
995
996 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
997                                        bool wc, uint32_t reg, uint32_t val)
998 {
999         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1000         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1001                                 WRITE_DATA_DST_SEL(0) |
1002                                 (wc ? WR_CONFIRM : 0));
1003         amdgpu_ring_write(ring, reg);
1004         amdgpu_ring_write(ring, 0);
1005         amdgpu_ring_write(ring, val);
1006 }
1007
1008 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1009                                   int mem_space, int opt, uint32_t addr0,
1010                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1011                                   uint32_t inv)
1012 {
1013         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1014         amdgpu_ring_write(ring,
1015                                  /* memory (1) or register (0) */
1016                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1017                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1018                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1019                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1020
1021         if (mem_space)
1022                 BUG_ON(addr0 & 0x3); /* Dword align */
1023         amdgpu_ring_write(ring, addr0);
1024         amdgpu_ring_write(ring, addr1);
1025         amdgpu_ring_write(ring, ref);
1026         amdgpu_ring_write(ring, mask);
1027         amdgpu_ring_write(ring, inv); /* poll interval */
1028 }
1029
1030 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1031 {
1032         struct amdgpu_device *adev = ring->adev;
1033         uint32_t scratch;
1034         uint32_t tmp = 0;
1035         unsigned i;
1036         int r;
1037
1038         r = amdgpu_gfx_scratch_get(adev, &scratch);
1039         if (r)
1040                 return r;
1041
1042         WREG32(scratch, 0xCAFEDEAD);
1043         r = amdgpu_ring_alloc(ring, 3);
1044         if (r)
1045                 goto error_free_scratch;
1046
1047         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1048         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1049         amdgpu_ring_write(ring, 0xDEADBEEF);
1050         amdgpu_ring_commit(ring);
1051
1052         for (i = 0; i < adev->usec_timeout; i++) {
1053                 tmp = RREG32(scratch);
1054                 if (tmp == 0xDEADBEEF)
1055                         break;
1056                 udelay(1);
1057         }
1058
1059         if (i >= adev->usec_timeout)
1060                 r = -ETIMEDOUT;
1061
1062 error_free_scratch:
1063         amdgpu_gfx_scratch_free(adev, scratch);
1064         return r;
1065 }
1066
1067 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1068 {
1069         struct amdgpu_device *adev = ring->adev;
1070         struct amdgpu_ib ib;
1071         struct dma_fence *f = NULL;
1072
1073         unsigned index;
1074         uint64_t gpu_addr;
1075         uint32_t tmp;
1076         long r;
1077
1078         r = amdgpu_device_wb_get(adev, &index);
1079         if (r)
1080                 return r;
1081
1082         gpu_addr = adev->wb.gpu_addr + (index * 4);
1083         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1084         memset(&ib, 0, sizeof(ib));
1085         r = amdgpu_ib_get(adev, NULL, 16,
1086                                         AMDGPU_IB_POOL_DIRECT, &ib);
1087         if (r)
1088                 goto err1;
1089
1090         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1091         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1092         ib.ptr[2] = lower_32_bits(gpu_addr);
1093         ib.ptr[3] = upper_32_bits(gpu_addr);
1094         ib.ptr[4] = 0xDEADBEEF;
1095         ib.length_dw = 5;
1096
1097         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1098         if (r)
1099                 goto err2;
1100
1101         r = dma_fence_wait_timeout(f, false, timeout);
1102         if (r == 0) {
1103                 r = -ETIMEDOUT;
1104                 goto err2;
1105         } else if (r < 0) {
1106                 goto err2;
1107         }
1108
1109         tmp = adev->wb.wb[index];
1110         if (tmp == 0xDEADBEEF)
1111                 r = 0;
1112         else
1113                 r = -EINVAL;
1114
1115 err2:
1116         amdgpu_ib_free(adev, &ib, NULL);
1117         dma_fence_put(f);
1118 err1:
1119         amdgpu_device_wb_free(adev, index);
1120         return r;
1121 }
1122
1123
1124 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1125 {
1126         release_firmware(adev->gfx.pfp_fw);
1127         adev->gfx.pfp_fw = NULL;
1128         release_firmware(adev->gfx.me_fw);
1129         adev->gfx.me_fw = NULL;
1130         release_firmware(adev->gfx.ce_fw);
1131         adev->gfx.ce_fw = NULL;
1132         release_firmware(adev->gfx.rlc_fw);
1133         adev->gfx.rlc_fw = NULL;
1134         release_firmware(adev->gfx.mec_fw);
1135         adev->gfx.mec_fw = NULL;
1136         release_firmware(adev->gfx.mec2_fw);
1137         adev->gfx.mec2_fw = NULL;
1138
1139         kfree(adev->gfx.rlc.register_list_format);
1140 }
1141
1142 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1143 {
1144         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1145
1146         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1147         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1148         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1149         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1150         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1151         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1152         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1153         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1154         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1155         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1156         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1157         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1158         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1159         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1160                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1161 }
1162
1163 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1164 {
1165         adev->gfx.me_fw_write_wait = false;
1166         adev->gfx.mec_fw_write_wait = false;
1167
1168         if ((adev->asic_type != CHIP_ARCTURUS) &&
1169             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1170             (adev->gfx.mec_feature_version < 46) ||
1171             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1172             (adev->gfx.pfp_feature_version < 46)))
1173                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1174
1175         switch (adev->asic_type) {
1176         case CHIP_VEGA10:
1177                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1178                     (adev->gfx.me_feature_version >= 42) &&
1179                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1180                     (adev->gfx.pfp_feature_version >= 42))
1181                         adev->gfx.me_fw_write_wait = true;
1182
1183                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1184                     (adev->gfx.mec_feature_version >= 42))
1185                         adev->gfx.mec_fw_write_wait = true;
1186                 break;
1187         case CHIP_VEGA12:
1188                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1189                     (adev->gfx.me_feature_version >= 44) &&
1190                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1191                     (adev->gfx.pfp_feature_version >= 44))
1192                         adev->gfx.me_fw_write_wait = true;
1193
1194                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1195                     (adev->gfx.mec_feature_version >= 44))
1196                         adev->gfx.mec_fw_write_wait = true;
1197                 break;
1198         case CHIP_VEGA20:
1199                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1200                     (adev->gfx.me_feature_version >= 44) &&
1201                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1202                     (adev->gfx.pfp_feature_version >= 44))
1203                         adev->gfx.me_fw_write_wait = true;
1204
1205                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1206                     (adev->gfx.mec_feature_version >= 44))
1207                         adev->gfx.mec_fw_write_wait = true;
1208                 break;
1209         case CHIP_RAVEN:
1210                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1211                     (adev->gfx.me_feature_version >= 42) &&
1212                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1213                     (adev->gfx.pfp_feature_version >= 42))
1214                         adev->gfx.me_fw_write_wait = true;
1215
1216                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1217                     (adev->gfx.mec_feature_version >= 42))
1218                         adev->gfx.mec_fw_write_wait = true;
1219                 break;
1220         default:
1221                 break;
1222         }
1223 }
1224
1225 struct amdgpu_gfxoff_quirk {
1226         u16 chip_vendor;
1227         u16 chip_device;
1228         u16 subsys_vendor;
1229         u16 subsys_device;
1230         u8 revision;
1231 };
1232
1233 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1234         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1235         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1236         { 0, 0, 0, 0, 0 },
1237 };
1238
1239 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1240 {
1241         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1242
1243         while (p && p->chip_device != 0) {
1244                 if (pdev->vendor == p->chip_vendor &&
1245                     pdev->device == p->chip_device &&
1246                     pdev->subsystem_vendor == p->subsys_vendor &&
1247                     pdev->subsystem_device == p->subsys_device &&
1248                     pdev->revision == p->revision) {
1249                         return true;
1250                 }
1251                 ++p;
1252         }
1253         return false;
1254 }
1255
1256 static bool is_raven_kicker(struct amdgpu_device *adev)
1257 {
1258         if (adev->pm.fw_version >= 0x41e2b)
1259                 return true;
1260         else
1261                 return false;
1262 }
1263
1264 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1265 {
1266         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1267                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1268
1269         switch (adev->asic_type) {
1270         case CHIP_VEGA10:
1271         case CHIP_VEGA12:
1272         case CHIP_VEGA20:
1273                 break;
1274         case CHIP_RAVEN:
1275                 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
1276                     ((!is_raven_kicker(adev) &&
1277                       adev->gfx.rlc_fw_version < 531) ||
1278                      (adev->gfx.rlc_feature_version < 1) ||
1279                      !adev->gfx.rlc.is_rlc_v2_1))
1280                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1281
1282                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1283                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1284                                 AMD_PG_SUPPORT_CP |
1285                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1286                 break;
1287         case CHIP_RENOIR:
1288                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1289                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1290                                 AMD_PG_SUPPORT_CP |
1291                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1292                 break;
1293         default:
1294                 break;
1295         }
1296 }
1297
1298 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1299                                           const char *chip_name)
1300 {
1301         char fw_name[30];
1302         int err;
1303         struct amdgpu_firmware_info *info = NULL;
1304         const struct common_firmware_header *header = NULL;
1305         const struct gfx_firmware_header_v1_0 *cp_hdr;
1306
1307         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1308         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1309         if (err)
1310                 goto out;
1311         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1312         if (err)
1313                 goto out;
1314         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1315         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1316         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1317
1318         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1319         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1320         if (err)
1321                 goto out;
1322         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1323         if (err)
1324                 goto out;
1325         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1326         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1327         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1328
1329         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1330         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1331         if (err)
1332                 goto out;
1333         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1334         if (err)
1335                 goto out;
1336         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1337         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1338         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1339
1340         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1341                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1342                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1343                 info->fw = adev->gfx.pfp_fw;
1344                 header = (const struct common_firmware_header *)info->fw->data;
1345                 adev->firmware.fw_size +=
1346                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1347
1348                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1349                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1350                 info->fw = adev->gfx.me_fw;
1351                 header = (const struct common_firmware_header *)info->fw->data;
1352                 adev->firmware.fw_size +=
1353                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1354
1355                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1356                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1357                 info->fw = adev->gfx.ce_fw;
1358                 header = (const struct common_firmware_header *)info->fw->data;
1359                 adev->firmware.fw_size +=
1360                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1361         }
1362
1363 out:
1364         if (err) {
1365                 dev_err(adev->dev,
1366                         "gfx9: Failed to load firmware \"%s\"\n",
1367                         fw_name);
1368                 release_firmware(adev->gfx.pfp_fw);
1369                 adev->gfx.pfp_fw = NULL;
1370                 release_firmware(adev->gfx.me_fw);
1371                 adev->gfx.me_fw = NULL;
1372                 release_firmware(adev->gfx.ce_fw);
1373                 adev->gfx.ce_fw = NULL;
1374         }
1375         return err;
1376 }
1377
1378 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1379                                           const char *chip_name)
1380 {
1381         char fw_name[30];
1382         int err;
1383         struct amdgpu_firmware_info *info = NULL;
1384         const struct common_firmware_header *header = NULL;
1385         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1386         unsigned int *tmp = NULL;
1387         unsigned int i = 0;
1388         uint16_t version_major;
1389         uint16_t version_minor;
1390         uint32_t smu_version;
1391
1392         /*
1393          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1394          * instead of picasso_rlc.bin.
1395          * Judgment method:
1396          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1397          *          or revision >= 0xD8 && revision <= 0xDF
1398          * otherwise is PCO FP5
1399          */
1400         if (!strcmp(chip_name, "picasso") &&
1401                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1402                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1403                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1404         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1405                 (smu_version >= 0x41e2b))
1406                 /**
1407                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1408                 */
1409                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1410         else
1411                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1412         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1413         if (err)
1414                 goto out;
1415         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1416         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1417
1418         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1419         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1420         if (version_major == 2 && version_minor == 1)
1421                 adev->gfx.rlc.is_rlc_v2_1 = true;
1422
1423         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1424         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1425         adev->gfx.rlc.save_and_restore_offset =
1426                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1427         adev->gfx.rlc.clear_state_descriptor_offset =
1428                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1429         adev->gfx.rlc.avail_scratch_ram_locations =
1430                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1431         adev->gfx.rlc.reg_restore_list_size =
1432                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1433         adev->gfx.rlc.reg_list_format_start =
1434                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1435         adev->gfx.rlc.reg_list_format_separate_start =
1436                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1437         adev->gfx.rlc.starting_offsets_start =
1438                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1439         adev->gfx.rlc.reg_list_format_size_bytes =
1440                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1441         adev->gfx.rlc.reg_list_size_bytes =
1442                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1443         adev->gfx.rlc.register_list_format =
1444                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1445                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1446         if (!adev->gfx.rlc.register_list_format) {
1447                 err = -ENOMEM;
1448                 goto out;
1449         }
1450
1451         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1452                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1453         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1454                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1455
1456         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1457
1458         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1459                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1460         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1461                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1462
1463         if (adev->gfx.rlc.is_rlc_v2_1)
1464                 gfx_v9_0_init_rlc_ext_microcode(adev);
1465
1466         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1467                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1468                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1469                 info->fw = adev->gfx.rlc_fw;
1470                 header = (const struct common_firmware_header *)info->fw->data;
1471                 adev->firmware.fw_size +=
1472                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1473
1474                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1475                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1476                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1477                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1478                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1479                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1480                         info->fw = adev->gfx.rlc_fw;
1481                         adev->firmware.fw_size +=
1482                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1483
1484                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1485                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1486                         info->fw = adev->gfx.rlc_fw;
1487                         adev->firmware.fw_size +=
1488                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1489
1490                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1491                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1492                         info->fw = adev->gfx.rlc_fw;
1493                         adev->firmware.fw_size +=
1494                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1495                 }
1496         }
1497
1498 out:
1499         if (err) {
1500                 dev_err(adev->dev,
1501                         "gfx9: Failed to load firmware \"%s\"\n",
1502                         fw_name);
1503                 release_firmware(adev->gfx.rlc_fw);
1504                 adev->gfx.rlc_fw = NULL;
1505         }
1506         return err;
1507 }
1508
1509 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1510                                           const char *chip_name)
1511 {
1512         char fw_name[30];
1513         int err;
1514         struct amdgpu_firmware_info *info = NULL;
1515         const struct common_firmware_header *header = NULL;
1516         const struct gfx_firmware_header_v1_0 *cp_hdr;
1517
1518         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1519         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1520         if (err)
1521                 goto out;
1522         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1523         if (err)
1524                 goto out;
1525         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1526         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1527         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1528
1529
1530         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1531         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1532         if (!err) {
1533                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1534                 if (err)
1535                         goto out;
1536                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1537                 adev->gfx.mec2_fw->data;
1538                 adev->gfx.mec2_fw_version =
1539                 le32_to_cpu(cp_hdr->header.ucode_version);
1540                 adev->gfx.mec2_feature_version =
1541                 le32_to_cpu(cp_hdr->ucode_feature_version);
1542         } else {
1543                 err = 0;
1544                 adev->gfx.mec2_fw = NULL;
1545         }
1546
1547         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1548                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1549                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1550                 info->fw = adev->gfx.mec_fw;
1551                 header = (const struct common_firmware_header *)info->fw->data;
1552                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1553                 adev->firmware.fw_size +=
1554                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1555
1556                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1557                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1558                 info->fw = adev->gfx.mec_fw;
1559                 adev->firmware.fw_size +=
1560                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1561
1562                 if (adev->gfx.mec2_fw) {
1563                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1564                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1565                         info->fw = adev->gfx.mec2_fw;
1566                         header = (const struct common_firmware_header *)info->fw->data;
1567                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1568                         adev->firmware.fw_size +=
1569                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1570
1571                         /* TODO: Determine if MEC2 JT FW loading can be removed
1572                                  for all GFX V9 asic and above */
1573                         if (adev->asic_type != CHIP_ARCTURUS &&
1574                             adev->asic_type != CHIP_RENOIR) {
1575                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1576                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1577                                 info->fw = adev->gfx.mec2_fw;
1578                                 adev->firmware.fw_size +=
1579                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1580                                         PAGE_SIZE);
1581                         }
1582                 }
1583         }
1584
1585 out:
1586         gfx_v9_0_check_if_need_gfxoff(adev);
1587         gfx_v9_0_check_fw_write_wait(adev);
1588         if (err) {
1589                 dev_err(adev->dev,
1590                         "gfx9: Failed to load firmware \"%s\"\n",
1591                         fw_name);
1592                 release_firmware(adev->gfx.mec_fw);
1593                 adev->gfx.mec_fw = NULL;
1594                 release_firmware(adev->gfx.mec2_fw);
1595                 adev->gfx.mec2_fw = NULL;
1596         }
1597         return err;
1598 }
1599
1600 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1601 {
1602         const char *chip_name;
1603         int r;
1604
1605         DRM_DEBUG("\n");
1606
1607         switch (adev->asic_type) {
1608         case CHIP_VEGA10:
1609                 chip_name = "vega10";
1610                 break;
1611         case CHIP_VEGA12:
1612                 chip_name = "vega12";
1613                 break;
1614         case CHIP_VEGA20:
1615                 chip_name = "vega20";
1616                 break;
1617         case CHIP_RAVEN:
1618                 if (adev->rev_id >= 8)
1619                         chip_name = "raven2";
1620                 else if (adev->pdev->device == 0x15d8)
1621                         chip_name = "picasso";
1622                 else
1623                         chip_name = "raven";
1624                 break;
1625         case CHIP_ARCTURUS:
1626                 chip_name = "arcturus";
1627                 break;
1628         case CHIP_RENOIR:
1629                 chip_name = "renoir";
1630                 break;
1631         default:
1632                 BUG();
1633         }
1634
1635         /* No CPG in Arcturus */
1636         if (adev->asic_type != CHIP_ARCTURUS) {
1637                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1638                 if (r)
1639                         return r;
1640         }
1641
1642         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1643         if (r)
1644                 return r;
1645
1646         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1647         if (r)
1648                 return r;
1649
1650         return r;
1651 }
1652
1653 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1654 {
1655         u32 count = 0;
1656         const struct cs_section_def *sect = NULL;
1657         const struct cs_extent_def *ext = NULL;
1658
1659         /* begin clear state */
1660         count += 2;
1661         /* context control state */
1662         count += 3;
1663
1664         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1665                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1666                         if (sect->id == SECT_CONTEXT)
1667                                 count += 2 + ext->reg_count;
1668                         else
1669                                 return 0;
1670                 }
1671         }
1672
1673         /* end clear state */
1674         count += 2;
1675         /* clear state */
1676         count += 2;
1677
1678         return count;
1679 }
1680
1681 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1682                                     volatile u32 *buffer)
1683 {
1684         u32 count = 0, i;
1685         const struct cs_section_def *sect = NULL;
1686         const struct cs_extent_def *ext = NULL;
1687
1688         if (adev->gfx.rlc.cs_data == NULL)
1689                 return;
1690         if (buffer == NULL)
1691                 return;
1692
1693         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1694         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1695
1696         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1697         buffer[count++] = cpu_to_le32(0x80000000);
1698         buffer[count++] = cpu_to_le32(0x80000000);
1699
1700         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1701                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1702                         if (sect->id == SECT_CONTEXT) {
1703                                 buffer[count++] =
1704                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1705                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1706                                                 PACKET3_SET_CONTEXT_REG_START);
1707                                 for (i = 0; i < ext->reg_count; i++)
1708                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1709                         } else {
1710                                 return;
1711                         }
1712                 }
1713         }
1714
1715         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1716         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1717
1718         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1719         buffer[count++] = cpu_to_le32(0);
1720 }
1721
1722 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1723 {
1724         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1725         uint32_t pg_always_on_cu_num = 2;
1726         uint32_t always_on_cu_num;
1727         uint32_t i, j, k;
1728         uint32_t mask, cu_bitmap, counter;
1729
1730         if (adev->flags & AMD_IS_APU)
1731                 always_on_cu_num = 4;
1732         else if (adev->asic_type == CHIP_VEGA12)
1733                 always_on_cu_num = 8;
1734         else
1735                 always_on_cu_num = 12;
1736
1737         mutex_lock(&adev->grbm_idx_mutex);
1738         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1739                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1740                         mask = 1;
1741                         cu_bitmap = 0;
1742                         counter = 0;
1743                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1744
1745                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1746                                 if (cu_info->bitmap[i][j] & mask) {
1747                                         if (counter == pg_always_on_cu_num)
1748                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1749                                         if (counter < always_on_cu_num)
1750                                                 cu_bitmap |= mask;
1751                                         else
1752                                                 break;
1753                                         counter++;
1754                                 }
1755                                 mask <<= 1;
1756                         }
1757
1758                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1759                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1760                 }
1761         }
1762         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1763         mutex_unlock(&adev->grbm_idx_mutex);
1764 }
1765
1766 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1767 {
1768         uint32_t data;
1769
1770         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1771         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1772         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1773         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1774         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1775
1776         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1777         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1778
1779         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1780         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1781
1782         mutex_lock(&adev->grbm_idx_mutex);
1783         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1784         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1785         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1786
1787         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1788         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1789         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1790         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1791         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1792
1793         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1794         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1795         data &= 0x0000FFFF;
1796         data |= 0x00C00000;
1797         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1798
1799         /*
1800          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1801          * programmed in gfx_v9_0_init_always_on_cu_mask()
1802          */
1803
1804         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1805          * but used for RLC_LB_CNTL configuration */
1806         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1807         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1808         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1809         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1810         mutex_unlock(&adev->grbm_idx_mutex);
1811
1812         gfx_v9_0_init_always_on_cu_mask(adev);
1813 }
1814
1815 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1816 {
1817         uint32_t data;
1818
1819         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1820         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1821         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1822         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1823         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1824
1825         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1826         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1827
1828         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1829         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1830
1831         mutex_lock(&adev->grbm_idx_mutex);
1832         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1833         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1834         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1835
1836         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1837         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1838         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1839         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1840         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1841
1842         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1843         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1844         data &= 0x0000FFFF;
1845         data |= 0x00C00000;
1846         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1847
1848         /*
1849          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1850          * programmed in gfx_v9_0_init_always_on_cu_mask()
1851          */
1852
1853         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1854          * but used for RLC_LB_CNTL configuration */
1855         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1856         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1857         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1858         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1859         mutex_unlock(&adev->grbm_idx_mutex);
1860
1861         gfx_v9_0_init_always_on_cu_mask(adev);
1862 }
1863
1864 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1865 {
1866         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1867 }
1868
1869 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1870 {
1871         return 5;
1872 }
1873
1874 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1875 {
1876         const struct cs_section_def *cs_data;
1877         int r;
1878
1879         adev->gfx.rlc.cs_data = gfx9_cs_data;
1880
1881         cs_data = adev->gfx.rlc.cs_data;
1882
1883         if (cs_data) {
1884                 /* init clear state block */
1885                 r = amdgpu_gfx_rlc_init_csb(adev);
1886                 if (r)
1887                         return r;
1888         }
1889
1890         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
1891                 /* TODO: double check the cp_table_size for RV */
1892                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1893                 r = amdgpu_gfx_rlc_init_cpt(adev);
1894                 if (r)
1895                         return r;
1896         }
1897
1898         switch (adev->asic_type) {
1899         case CHIP_RAVEN:
1900                 gfx_v9_0_init_lbpw(adev);
1901                 break;
1902         case CHIP_VEGA20:
1903                 gfx_v9_4_init_lbpw(adev);
1904                 break;
1905         default:
1906                 break;
1907         }
1908
1909         /* init spm vmid with 0xf */
1910         if (adev->gfx.rlc.funcs->update_spm_vmid)
1911                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1912
1913         return 0;
1914 }
1915
1916 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1917 {
1918         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1919         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1920 }
1921
1922 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1923 {
1924         int r;
1925         u32 *hpd;
1926         const __le32 *fw_data;
1927         unsigned fw_size;
1928         u32 *fw;
1929         size_t mec_hpd_size;
1930
1931         const struct gfx_firmware_header_v1_0 *mec_hdr;
1932
1933         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1934
1935         /* take ownership of the relevant compute queues */
1936         amdgpu_gfx_compute_queue_acquire(adev);
1937         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1938
1939         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1940                                       AMDGPU_GEM_DOMAIN_VRAM,
1941                                       &adev->gfx.mec.hpd_eop_obj,
1942                                       &adev->gfx.mec.hpd_eop_gpu_addr,
1943                                       (void **)&hpd);
1944         if (r) {
1945                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1946                 gfx_v9_0_mec_fini(adev);
1947                 return r;
1948         }
1949
1950         memset(hpd, 0, mec_hpd_size);
1951
1952         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1953         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1954
1955         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1956
1957         fw_data = (const __le32 *)
1958                 (adev->gfx.mec_fw->data +
1959                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1960         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1961
1962         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1963                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1964                                       &adev->gfx.mec.mec_fw_obj,
1965                                       &adev->gfx.mec.mec_fw_gpu_addr,
1966                                       (void **)&fw);
1967         if (r) {
1968                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1969                 gfx_v9_0_mec_fini(adev);
1970                 return r;
1971         }
1972
1973         memcpy(fw, fw_data, fw_size);
1974
1975         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1976         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1977
1978         return 0;
1979 }
1980
1981 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1982 {
1983         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1984                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1985                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1986                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1987                 (SQ_IND_INDEX__FORCE_READ_MASK));
1988         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1989 }
1990
1991 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1992                            uint32_t wave, uint32_t thread,
1993                            uint32_t regno, uint32_t num, uint32_t *out)
1994 {
1995         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1996                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1997                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1998                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1999                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2000                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2001                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2002         while (num--)
2003                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2004 }
2005
2006 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2007 {
2008         /* type 1 wave data */
2009         dst[(*no_fields)++] = 1;
2010         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2011         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2012         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2013         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2014         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2015         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2016         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2017         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2018         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2019         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2020         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2021         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2022         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2023         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2024 }
2025
2026 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2027                                      uint32_t wave, uint32_t start,
2028                                      uint32_t size, uint32_t *dst)
2029 {
2030         wave_read_regs(
2031                 adev, simd, wave, 0,
2032                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2033 }
2034
2035 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2036                                      uint32_t wave, uint32_t thread,
2037                                      uint32_t start, uint32_t size,
2038                                      uint32_t *dst)
2039 {
2040         wave_read_regs(
2041                 adev, simd, wave, thread,
2042                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2043 }
2044
2045 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2046                                   u32 me, u32 pipe, u32 q, u32 vm)
2047 {
2048         soc15_grbm_select(adev, me, pipe, q, vm);
2049 }
2050
2051 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2052         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2053         .select_se_sh = &gfx_v9_0_select_se_sh,
2054         .read_wave_data = &gfx_v9_0_read_wave_data,
2055         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2056         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2057         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2058         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2059         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2060         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2061 };
2062
2063 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2064         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2065         .select_se_sh = &gfx_v9_0_select_se_sh,
2066         .read_wave_data = &gfx_v9_0_read_wave_data,
2067         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2068         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2069         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2070         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2071         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2072         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2073 };
2074
2075 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2076 {
2077         u32 gb_addr_config;
2078         int err;
2079
2080         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2081
2082         switch (adev->asic_type) {
2083         case CHIP_VEGA10:
2084                 adev->gfx.config.max_hw_contexts = 8;
2085                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2086                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2087                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2088                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2089                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2090                 break;
2091         case CHIP_VEGA12:
2092                 adev->gfx.config.max_hw_contexts = 8;
2093                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2094                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2095                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2096                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2097                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2098                 DRM_INFO("fix gfx.config for vega12\n");
2099                 break;
2100         case CHIP_VEGA20:
2101                 adev->gfx.config.max_hw_contexts = 8;
2102                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2103                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2104                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2105                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2106                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2107                 gb_addr_config &= ~0xf3e777ff;
2108                 gb_addr_config |= 0x22014042;
2109                 /* check vbios table if gpu info is not available */
2110                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2111                 if (err)
2112                         return err;
2113                 break;
2114         case CHIP_RAVEN:
2115                 adev->gfx.config.max_hw_contexts = 8;
2116                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2117                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2118                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2119                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2120                 if (adev->rev_id >= 8)
2121                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2122                 else
2123                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2124                 break;
2125         case CHIP_ARCTURUS:
2126                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2127                 adev->gfx.config.max_hw_contexts = 8;
2128                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2129                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2130                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2131                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2132                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2133                 gb_addr_config &= ~0xf3e777ff;
2134                 gb_addr_config |= 0x22014042;
2135                 break;
2136         case CHIP_RENOIR:
2137                 adev->gfx.config.max_hw_contexts = 8;
2138                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2139                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2140                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2141                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2142                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2143                 gb_addr_config &= ~0xf3e777ff;
2144                 gb_addr_config |= 0x22010042;
2145                 break;
2146         default:
2147                 BUG();
2148                 break;
2149         }
2150
2151         adev->gfx.config.gb_addr_config = gb_addr_config;
2152
2153         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2154                         REG_GET_FIELD(
2155                                         adev->gfx.config.gb_addr_config,
2156                                         GB_ADDR_CONFIG,
2157                                         NUM_PIPES);
2158
2159         adev->gfx.config.max_tile_pipes =
2160                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2161
2162         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2163                         REG_GET_FIELD(
2164                                         adev->gfx.config.gb_addr_config,
2165                                         GB_ADDR_CONFIG,
2166                                         NUM_BANKS);
2167         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2168                         REG_GET_FIELD(
2169                                         adev->gfx.config.gb_addr_config,
2170                                         GB_ADDR_CONFIG,
2171                                         MAX_COMPRESSED_FRAGS);
2172         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2173                         REG_GET_FIELD(
2174                                         adev->gfx.config.gb_addr_config,
2175                                         GB_ADDR_CONFIG,
2176                                         NUM_RB_PER_SE);
2177         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2178                         REG_GET_FIELD(
2179                                         adev->gfx.config.gb_addr_config,
2180                                         GB_ADDR_CONFIG,
2181                                         NUM_SHADER_ENGINES);
2182         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2183                         REG_GET_FIELD(
2184                                         adev->gfx.config.gb_addr_config,
2185                                         GB_ADDR_CONFIG,
2186                                         PIPE_INTERLEAVE_SIZE));
2187
2188         return 0;
2189 }
2190
2191 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2192                                       int mec, int pipe, int queue)
2193 {
2194         int r;
2195         unsigned irq_type;
2196         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2197
2198         ring = &adev->gfx.compute_ring[ring_id];
2199
2200         /* mec0 is me1 */
2201         ring->me = mec + 1;
2202         ring->pipe = pipe;
2203         ring->queue = queue;
2204
2205         ring->ring_obj = NULL;
2206         ring->use_doorbell = true;
2207         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2208         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2209                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2210         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2211
2212         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2213                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2214                 + ring->pipe;
2215
2216         /* type-2 packets are deprecated on MEC, use type-3 instead */
2217         r = amdgpu_ring_init(adev, ring, 1024,
2218                              &adev->gfx.eop_irq, irq_type);
2219         if (r)
2220                 return r;
2221
2222
2223         return 0;
2224 }
2225
2226 static int gfx_v9_0_sw_init(void *handle)
2227 {
2228         int i, j, k, r, ring_id;
2229         struct amdgpu_ring *ring;
2230         struct amdgpu_kiq *kiq;
2231         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2232
2233         switch (adev->asic_type) {
2234         case CHIP_VEGA10:
2235         case CHIP_VEGA12:
2236         case CHIP_VEGA20:
2237         case CHIP_RAVEN:
2238         case CHIP_ARCTURUS:
2239         case CHIP_RENOIR:
2240                 adev->gfx.mec.num_mec = 2;
2241                 break;
2242         default:
2243                 adev->gfx.mec.num_mec = 1;
2244                 break;
2245         }
2246
2247         adev->gfx.mec.num_pipe_per_mec = 4;
2248         adev->gfx.mec.num_queue_per_pipe = 8;
2249
2250         /* EOP Event */
2251         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2252         if (r)
2253                 return r;
2254
2255         /* Privileged reg */
2256         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2257                               &adev->gfx.priv_reg_irq);
2258         if (r)
2259                 return r;
2260
2261         /* Privileged inst */
2262         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2263                               &adev->gfx.priv_inst_irq);
2264         if (r)
2265                 return r;
2266
2267         /* ECC error */
2268         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2269                               &adev->gfx.cp_ecc_error_irq);
2270         if (r)
2271                 return r;
2272
2273         /* FUE error */
2274         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2275                               &adev->gfx.cp_ecc_error_irq);
2276         if (r)
2277                 return r;
2278
2279         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2280
2281         gfx_v9_0_scratch_init(adev);
2282
2283         r = gfx_v9_0_init_microcode(adev);
2284         if (r) {
2285                 DRM_ERROR("Failed to load gfx firmware!\n");
2286                 return r;
2287         }
2288
2289         r = adev->gfx.rlc.funcs->init(adev);
2290         if (r) {
2291                 DRM_ERROR("Failed to init rlc BOs!\n");
2292                 return r;
2293         }
2294
2295         r = gfx_v9_0_mec_init(adev);
2296         if (r) {
2297                 DRM_ERROR("Failed to init MEC BOs!\n");
2298                 return r;
2299         }
2300
2301         /* set up the gfx ring */
2302         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2303                 ring = &adev->gfx.gfx_ring[i];
2304                 ring->ring_obj = NULL;
2305                 if (!i)
2306                         sprintf(ring->name, "gfx");
2307                 else
2308                         sprintf(ring->name, "gfx_%d", i);
2309                 ring->use_doorbell = true;
2310                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2311                 r = amdgpu_ring_init(adev, ring, 1024,
2312                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
2313                 if (r)
2314                         return r;
2315         }
2316
2317         /* set up the compute queues - allocate horizontally across pipes */
2318         ring_id = 0;
2319         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2320                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2321                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2322                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2323                                         continue;
2324
2325                                 r = gfx_v9_0_compute_ring_init(adev,
2326                                                                ring_id,
2327                                                                i, k, j);
2328                                 if (r)
2329                                         return r;
2330
2331                                 ring_id++;
2332                         }
2333                 }
2334         }
2335
2336         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2337         if (r) {
2338                 DRM_ERROR("Failed to init KIQ BOs!\n");
2339                 return r;
2340         }
2341
2342         kiq = &adev->gfx.kiq;
2343         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2344         if (r)
2345                 return r;
2346
2347         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2348         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2349         if (r)
2350                 return r;
2351
2352         adev->gfx.ce_ram_size = 0x8000;
2353
2354         r = gfx_v9_0_gpu_early_init(adev);
2355         if (r)
2356                 return r;
2357
2358         return 0;
2359 }
2360
2361
2362 static int gfx_v9_0_sw_fini(void *handle)
2363 {
2364         int i;
2365         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2366
2367         amdgpu_gfx_ras_fini(adev);
2368
2369         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2370                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2371         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2372                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2373
2374         amdgpu_gfx_mqd_sw_fini(adev);
2375         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2376         amdgpu_gfx_kiq_fini(adev);
2377
2378         gfx_v9_0_mec_fini(adev);
2379         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2380         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
2381                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2382                                 &adev->gfx.rlc.cp_table_gpu_addr,
2383                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2384         }
2385         gfx_v9_0_free_microcode(adev);
2386
2387         return 0;
2388 }
2389
2390
2391 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2392 {
2393         /* TODO */
2394 }
2395
2396 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2397 {
2398         u32 data;
2399
2400         if (instance == 0xffffffff)
2401                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2402         else
2403                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2404
2405         if (se_num == 0xffffffff)
2406                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2407         else
2408                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2409
2410         if (sh_num == 0xffffffff)
2411                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2412         else
2413                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2414
2415         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2416 }
2417
2418 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2419 {
2420         u32 data, mask;
2421
2422         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2423         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2424
2425         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2426         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2427
2428         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2429                                          adev->gfx.config.max_sh_per_se);
2430
2431         return (~data) & mask;
2432 }
2433
2434 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2435 {
2436         int i, j;
2437         u32 data;
2438         u32 active_rbs = 0;
2439         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2440                                         adev->gfx.config.max_sh_per_se;
2441
2442         mutex_lock(&adev->grbm_idx_mutex);
2443         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2444                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2445                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2446                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2447                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2448                                                rb_bitmap_width_per_sh);
2449                 }
2450         }
2451         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2452         mutex_unlock(&adev->grbm_idx_mutex);
2453
2454         adev->gfx.config.backend_enable_mask = active_rbs;
2455         adev->gfx.config.num_rbs = hweight32(active_rbs);
2456 }
2457
2458 #define DEFAULT_SH_MEM_BASES    (0x6000)
2459 #define FIRST_COMPUTE_VMID      (8)
2460 #define LAST_COMPUTE_VMID       (16)
2461 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2462 {
2463         int i;
2464         uint32_t sh_mem_config;
2465         uint32_t sh_mem_bases;
2466
2467         /*
2468          * Configure apertures:
2469          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2470          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2471          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2472          */
2473         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2474
2475         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2476                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2477                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2478
2479         mutex_lock(&adev->srbm_mutex);
2480         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2481                 soc15_grbm_select(adev, 0, 0, 0, i);
2482                 /* CP and shaders */
2483                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2484                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2485         }
2486         soc15_grbm_select(adev, 0, 0, 0, 0);
2487         mutex_unlock(&adev->srbm_mutex);
2488
2489         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2490            acccess. These should be enabled by FW for target VMIDs. */
2491         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2492                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2493                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2494                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2495                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2496         }
2497 }
2498
2499 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2500 {
2501         int vmid;
2502
2503         /*
2504          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2505          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2506          * the driver can enable them for graphics. VMID0 should maintain
2507          * access so that HWS firmware can save/restore entries.
2508          */
2509         for (vmid = 1; vmid < 16; vmid++) {
2510                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2511                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2512                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2513                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2514         }
2515 }
2516
2517 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2518 {
2519         uint32_t tmp;
2520
2521         switch (adev->asic_type) {
2522         case CHIP_ARCTURUS:
2523                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2524                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2525                                         DISABLE_BARRIER_WAITCNT, 1);
2526                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2527                 break;
2528         default:
2529                 break;
2530         };
2531 }
2532
2533 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2534 {
2535         u32 tmp;
2536         int i;
2537
2538         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2539
2540         gfx_v9_0_tiling_mode_table_init(adev);
2541
2542         gfx_v9_0_setup_rb(adev);
2543         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2544         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2545
2546         /* XXX SH_MEM regs */
2547         /* where to put LDS, scratch, GPUVM in FSA64 space */
2548         mutex_lock(&adev->srbm_mutex);
2549         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2550                 soc15_grbm_select(adev, 0, 0, 0, i);
2551                 /* CP and shaders */
2552                 if (i == 0) {
2553                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2554                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2555                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2556                                             !!amdgpu_noretry);
2557                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2558                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2559                 } else {
2560                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2561                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2562                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2563                                             !!amdgpu_noretry);
2564                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2565                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2566                                 (adev->gmc.private_aperture_start >> 48));
2567                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2568                                 (adev->gmc.shared_aperture_start >> 48));
2569                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2570                 }
2571         }
2572         soc15_grbm_select(adev, 0, 0, 0, 0);
2573
2574         mutex_unlock(&adev->srbm_mutex);
2575
2576         gfx_v9_0_init_compute_vmid(adev);
2577         gfx_v9_0_init_gds_vmid(adev);
2578         gfx_v9_0_init_sq_config(adev);
2579 }
2580
2581 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2582 {
2583         u32 i, j, k;
2584         u32 mask;
2585
2586         mutex_lock(&adev->grbm_idx_mutex);
2587         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2588                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2589                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2590                         for (k = 0; k < adev->usec_timeout; k++) {
2591                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2592                                         break;
2593                                 udelay(1);
2594                         }
2595                         if (k == adev->usec_timeout) {
2596                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2597                                                       0xffffffff, 0xffffffff);
2598                                 mutex_unlock(&adev->grbm_idx_mutex);
2599                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2600                                          i, j);
2601                                 return;
2602                         }
2603                 }
2604         }
2605         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2606         mutex_unlock(&adev->grbm_idx_mutex);
2607
2608         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2609                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2610                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2611                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2612         for (k = 0; k < adev->usec_timeout; k++) {
2613                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2614                         break;
2615                 udelay(1);
2616         }
2617 }
2618
2619 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2620                                                bool enable)
2621 {
2622         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2623
2624         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2625         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2626         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2627         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2628
2629         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2630 }
2631
2632 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2633 {
2634         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2635         /* csib */
2636         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2637                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2638         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2639                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2640         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2641                         adev->gfx.rlc.clear_state_size);
2642 }
2643
2644 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2645                                 int indirect_offset,
2646                                 int list_size,
2647                                 int *unique_indirect_regs,
2648                                 int unique_indirect_reg_count,
2649                                 int *indirect_start_offsets,
2650                                 int *indirect_start_offsets_count,
2651                                 int max_start_offsets_count)
2652 {
2653         int idx;
2654
2655         for (; indirect_offset < list_size; indirect_offset++) {
2656                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2657                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2658                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2659
2660                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2661                         indirect_offset += 2;
2662
2663                         /* look for the matching indice */
2664                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2665                                 if (unique_indirect_regs[idx] ==
2666                                         register_list_format[indirect_offset] ||
2667                                         !unique_indirect_regs[idx])
2668                                         break;
2669                         }
2670
2671                         BUG_ON(idx >= unique_indirect_reg_count);
2672
2673                         if (!unique_indirect_regs[idx])
2674                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2675
2676                         indirect_offset++;
2677                 }
2678         }
2679 }
2680
2681 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2682 {
2683         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2684         int unique_indirect_reg_count = 0;
2685
2686         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2687         int indirect_start_offsets_count = 0;
2688
2689         int list_size = 0;
2690         int i = 0, j = 0;
2691         u32 tmp = 0;
2692
2693         u32 *register_list_format =
2694                 kmemdup(adev->gfx.rlc.register_list_format,
2695                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2696         if (!register_list_format)
2697                 return -ENOMEM;
2698
2699         /* setup unique_indirect_regs array and indirect_start_offsets array */
2700         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2701         gfx_v9_1_parse_ind_reg_list(register_list_format,
2702                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2703                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2704                                     unique_indirect_regs,
2705                                     unique_indirect_reg_count,
2706                                     indirect_start_offsets,
2707                                     &indirect_start_offsets_count,
2708                                     ARRAY_SIZE(indirect_start_offsets));
2709
2710         /* enable auto inc in case it is disabled */
2711         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2712         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2713         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2714
2715         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2716         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2717                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2718         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2719                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2720                         adev->gfx.rlc.register_restore[i]);
2721
2722         /* load indirect register */
2723         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2724                 adev->gfx.rlc.reg_list_format_start);
2725
2726         /* direct register portion */
2727         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2728                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2729                         register_list_format[i]);
2730
2731         /* indirect register portion */
2732         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2733                 if (register_list_format[i] == 0xFFFFFFFF) {
2734                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2735                         continue;
2736                 }
2737
2738                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2739                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2740
2741                 for (j = 0; j < unique_indirect_reg_count; j++) {
2742                         if (register_list_format[i] == unique_indirect_regs[j]) {
2743                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2744                                 break;
2745                         }
2746                 }
2747
2748                 BUG_ON(j >= unique_indirect_reg_count);
2749
2750                 i++;
2751         }
2752
2753         /* set save/restore list size */
2754         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2755         list_size = list_size >> 1;
2756         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2757                 adev->gfx.rlc.reg_restore_list_size);
2758         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2759
2760         /* write the starting offsets to RLC scratch ram */
2761         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2762                 adev->gfx.rlc.starting_offsets_start);
2763         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2764                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2765                        indirect_start_offsets[i]);
2766
2767         /* load unique indirect regs*/
2768         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2769                 if (unique_indirect_regs[i] != 0) {
2770                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2771                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2772                                unique_indirect_regs[i] & 0x3FFFF);
2773
2774                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2775                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2776                                unique_indirect_regs[i] >> 20);
2777                 }
2778         }
2779
2780         kfree(register_list_format);
2781         return 0;
2782 }
2783
2784 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2785 {
2786         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2787 }
2788
2789 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2790                                              bool enable)
2791 {
2792         uint32_t data = 0;
2793         uint32_t default_data = 0;
2794
2795         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2796         if (enable == true) {
2797                 /* enable GFXIP control over CGPG */
2798                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2799                 if(default_data != data)
2800                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2801
2802                 /* update status */
2803                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2804                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2805                 if(default_data != data)
2806                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2807         } else {
2808                 /* restore GFXIP control over GCPG */
2809                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2810                 if(default_data != data)
2811                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2812         }
2813 }
2814
2815 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2816 {
2817         uint32_t data = 0;
2818
2819         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2820                               AMD_PG_SUPPORT_GFX_SMG |
2821                               AMD_PG_SUPPORT_GFX_DMG)) {
2822                 /* init IDLE_POLL_COUNT = 60 */
2823                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2824                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2825                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2826                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2827
2828                 /* init RLC PG Delay */
2829                 data = 0;
2830                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2831                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2832                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2833                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2834                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2835
2836                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2837                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2838                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2839                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2840
2841                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2842                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2843                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2844                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2845
2846                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2847                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2848
2849                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2850                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2851                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2852
2853                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2854         }
2855 }
2856
2857 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2858                                                 bool enable)
2859 {
2860         uint32_t data = 0;
2861         uint32_t default_data = 0;
2862
2863         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2864         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2865                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2866                              enable ? 1 : 0);
2867         if (default_data != data)
2868                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2869 }
2870
2871 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2872                                                 bool enable)
2873 {
2874         uint32_t data = 0;
2875         uint32_t default_data = 0;
2876
2877         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2878         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2879                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2880                              enable ? 1 : 0);
2881         if(default_data != data)
2882                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2883 }
2884
2885 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2886                                         bool enable)
2887 {
2888         uint32_t data = 0;
2889         uint32_t default_data = 0;
2890
2891         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2892         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2893                              CP_PG_DISABLE,
2894                              enable ? 0 : 1);
2895         if(default_data != data)
2896                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2897 }
2898
2899 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2900                                                 bool enable)
2901 {
2902         uint32_t data, default_data;
2903
2904         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2905         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2906                              GFX_POWER_GATING_ENABLE,
2907                              enable ? 1 : 0);
2908         if(default_data != data)
2909                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2910 }
2911
2912 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2913                                                 bool enable)
2914 {
2915         uint32_t data, default_data;
2916
2917         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2918         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2919                              GFX_PIPELINE_PG_ENABLE,
2920                              enable ? 1 : 0);
2921         if(default_data != data)
2922                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2923
2924         if (!enable)
2925                 /* read any GFX register to wake up GFX */
2926                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2927 }
2928
2929 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2930                                                        bool enable)
2931 {
2932         uint32_t data, default_data;
2933
2934         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2935         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2936                              STATIC_PER_CU_PG_ENABLE,
2937                              enable ? 1 : 0);
2938         if(default_data != data)
2939                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2940 }
2941
2942 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2943                                                 bool enable)
2944 {
2945         uint32_t data, default_data;
2946
2947         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2948         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2949                              DYN_PER_CU_PG_ENABLE,
2950                              enable ? 1 : 0);
2951         if(default_data != data)
2952                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2953 }
2954
2955 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2956 {
2957         gfx_v9_0_init_csb(adev);
2958
2959         /*
2960          * Rlc save restore list is workable since v2_1.
2961          * And it's needed by gfxoff feature.
2962          */
2963         if (adev->gfx.rlc.is_rlc_v2_1) {
2964                 if (adev->asic_type == CHIP_VEGA12 ||
2965                     (adev->asic_type == CHIP_RAVEN &&
2966                      adev->rev_id >= 8))
2967                         gfx_v9_1_init_rlc_save_restore_list(adev);
2968                 gfx_v9_0_enable_save_restore_machine(adev);
2969         }
2970
2971         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2972                               AMD_PG_SUPPORT_GFX_SMG |
2973                               AMD_PG_SUPPORT_GFX_DMG |
2974                               AMD_PG_SUPPORT_CP |
2975                               AMD_PG_SUPPORT_GDS |
2976                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2977                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2978                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2979                 gfx_v9_0_init_gfx_power_gating(adev);
2980         }
2981 }
2982
2983 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2984 {
2985         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2986         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2987         gfx_v9_0_wait_for_rlc_serdes(adev);
2988 }
2989
2990 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2991 {
2992         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2993         udelay(50);
2994         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2995         udelay(50);
2996 }
2997
2998 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2999 {
3000 #ifdef AMDGPU_RLC_DEBUG_RETRY
3001         u32 rlc_ucode_ver;
3002 #endif
3003
3004         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3005         udelay(50);
3006
3007         /* carrizo do enable cp interrupt after cp inited */
3008         if (!(adev->flags & AMD_IS_APU)) {
3009                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3010                 udelay(50);
3011         }
3012
3013 #ifdef AMDGPU_RLC_DEBUG_RETRY
3014         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3015         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3016         if(rlc_ucode_ver == 0x108) {
3017                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3018                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3019                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3020                  * default is 0x9C4 to create a 100us interval */
3021                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3022                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3023                  * to disable the page fault retry interrupts, default is
3024                  * 0x100 (256) */
3025                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3026         }
3027 #endif
3028 }
3029
3030 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3031 {
3032         const struct rlc_firmware_header_v2_0 *hdr;
3033         const __le32 *fw_data;
3034         unsigned i, fw_size;
3035
3036         if (!adev->gfx.rlc_fw)
3037                 return -EINVAL;
3038
3039         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3040         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3041
3042         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3043                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3044         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3045
3046         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3047                         RLCG_UCODE_LOADING_START_ADDRESS);
3048         for (i = 0; i < fw_size; i++)
3049                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3050         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3051
3052         return 0;
3053 }
3054
3055 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3056 {
3057         int r;
3058
3059         if (amdgpu_sriov_vf(adev)) {
3060                 gfx_v9_0_init_csb(adev);
3061                 return 0;
3062         }
3063
3064         adev->gfx.rlc.funcs->stop(adev);
3065
3066         /* disable CG */
3067         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3068
3069         gfx_v9_0_init_pg(adev);
3070
3071         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3072                 /* legacy rlc firmware loading */
3073                 r = gfx_v9_0_rlc_load_microcode(adev);
3074                 if (r)
3075                         return r;
3076         }
3077
3078         switch (adev->asic_type) {
3079         case CHIP_RAVEN:
3080                 if (amdgpu_lbpw == 0)
3081                         gfx_v9_0_enable_lbpw(adev, false);
3082                 else
3083                         gfx_v9_0_enable_lbpw(adev, true);
3084                 break;
3085         case CHIP_VEGA20:
3086                 if (amdgpu_lbpw > 0)
3087                         gfx_v9_0_enable_lbpw(adev, true);
3088                 else
3089                         gfx_v9_0_enable_lbpw(adev, false);
3090                 break;
3091         default:
3092                 break;
3093         }
3094
3095         adev->gfx.rlc.funcs->start(adev);
3096
3097         return 0;
3098 }
3099
3100 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3101 {
3102         int i;
3103         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3104
3105         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3106         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3107         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3108         if (!enable) {
3109                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3110                         adev->gfx.gfx_ring[i].sched.ready = false;
3111         }
3112         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3113         udelay(50);
3114 }
3115
3116 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3117 {
3118         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3119         const struct gfx_firmware_header_v1_0 *ce_hdr;
3120         const struct gfx_firmware_header_v1_0 *me_hdr;
3121         const __le32 *fw_data;
3122         unsigned i, fw_size;
3123
3124         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3125                 return -EINVAL;
3126
3127         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3128                 adev->gfx.pfp_fw->data;
3129         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3130                 adev->gfx.ce_fw->data;
3131         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3132                 adev->gfx.me_fw->data;
3133
3134         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3135         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3136         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3137
3138         gfx_v9_0_cp_gfx_enable(adev, false);
3139
3140         /* PFP */
3141         fw_data = (const __le32 *)
3142                 (adev->gfx.pfp_fw->data +
3143                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3144         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3145         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3146         for (i = 0; i < fw_size; i++)
3147                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3148         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3149
3150         /* CE */
3151         fw_data = (const __le32 *)
3152                 (adev->gfx.ce_fw->data +
3153                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3154         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3155         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3156         for (i = 0; i < fw_size; i++)
3157                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3158         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3159
3160         /* ME */
3161         fw_data = (const __le32 *)
3162                 (adev->gfx.me_fw->data +
3163                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3164         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3165         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3166         for (i = 0; i < fw_size; i++)
3167                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3168         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3169
3170         return 0;
3171 }
3172
3173 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3174 {
3175         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3176         const struct cs_section_def *sect = NULL;
3177         const struct cs_extent_def *ext = NULL;
3178         int r, i, tmp;
3179
3180         /* init the CP */
3181         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3182         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3183
3184         gfx_v9_0_cp_gfx_enable(adev, true);
3185
3186         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3187         if (r) {
3188                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3189                 return r;
3190         }
3191
3192         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3193         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3194
3195         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3196         amdgpu_ring_write(ring, 0x80000000);
3197         amdgpu_ring_write(ring, 0x80000000);
3198
3199         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3200                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3201                         if (sect->id == SECT_CONTEXT) {
3202                                 amdgpu_ring_write(ring,
3203                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3204                                                ext->reg_count));
3205                                 amdgpu_ring_write(ring,
3206                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3207                                 for (i = 0; i < ext->reg_count; i++)
3208                                         amdgpu_ring_write(ring, ext->extent[i]);
3209                         }
3210                 }
3211         }
3212
3213         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3214         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3215
3216         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3217         amdgpu_ring_write(ring, 0);
3218
3219         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3220         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3221         amdgpu_ring_write(ring, 0x8000);
3222         amdgpu_ring_write(ring, 0x8000);
3223
3224         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3225         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3226                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3227         amdgpu_ring_write(ring, tmp);
3228         amdgpu_ring_write(ring, 0);
3229
3230         amdgpu_ring_commit(ring);
3231
3232         return 0;
3233 }
3234
3235 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3236 {
3237         struct amdgpu_ring *ring;
3238         u32 tmp;
3239         u32 rb_bufsz;
3240         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3241
3242         /* Set the write pointer delay */
3243         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3244
3245         /* set the RB to use vmid 0 */
3246         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3247
3248         /* Set ring buffer size */
3249         ring = &adev->gfx.gfx_ring[0];
3250         rb_bufsz = order_base_2(ring->ring_size / 8);
3251         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3252         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3253 #ifdef __BIG_ENDIAN
3254         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3255 #endif
3256         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3257
3258         /* Initialize the ring buffer's write pointers */
3259         ring->wptr = 0;
3260         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3261         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3262
3263         /* set the wb address wether it's enabled or not */
3264         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3265         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3266         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3267
3268         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3269         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3270         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3271
3272         mdelay(1);
3273         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3274
3275         rb_addr = ring->gpu_addr >> 8;
3276         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3277         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3278
3279         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3280         if (ring->use_doorbell) {
3281                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3282                                     DOORBELL_OFFSET, ring->doorbell_index);
3283                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3284                                     DOORBELL_EN, 1);
3285         } else {
3286                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3287         }
3288         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3289
3290         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3291                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3292         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3293
3294         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3295                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3296
3297
3298         /* start the ring */
3299         gfx_v9_0_cp_gfx_start(adev);
3300         ring->sched.ready = true;
3301
3302         return 0;
3303 }
3304
3305 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3306 {
3307         int i;
3308
3309         if (enable) {
3310                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3311         } else {
3312                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3313                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3314                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3315                         adev->gfx.compute_ring[i].sched.ready = false;
3316                 adev->gfx.kiq.ring.sched.ready = false;
3317         }
3318         udelay(50);
3319 }
3320
3321 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3322 {
3323         const struct gfx_firmware_header_v1_0 *mec_hdr;
3324         const __le32 *fw_data;
3325         unsigned i;
3326         u32 tmp;
3327
3328         if (!adev->gfx.mec_fw)
3329                 return -EINVAL;
3330
3331         gfx_v9_0_cp_compute_enable(adev, false);
3332
3333         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3334         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3335
3336         fw_data = (const __le32 *)
3337                 (adev->gfx.mec_fw->data +
3338                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3339         tmp = 0;
3340         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3341         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3342         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3343
3344         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3345                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3346         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3347                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3348
3349         /* MEC1 */
3350         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3351                          mec_hdr->jt_offset);
3352         for (i = 0; i < mec_hdr->jt_size; i++)
3353                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3354                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3355
3356         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3357                         adev->gfx.mec_fw_version);
3358         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3359
3360         return 0;
3361 }
3362
3363 /* KIQ functions */
3364 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3365 {
3366         uint32_t tmp;
3367         struct amdgpu_device *adev = ring->adev;
3368
3369         /* tell RLC which is KIQ queue */
3370         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3371         tmp &= 0xffffff00;
3372         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3373         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3374         tmp |= 0x80;
3375         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3376 }
3377
3378 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3379 {
3380         struct amdgpu_device *adev = ring->adev;
3381
3382         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3383                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
3384                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3385                         ring->has_high_prio = true;
3386                         mqd->cp_hqd_queue_priority =
3387                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3388                 } else {
3389                         ring->has_high_prio = false;
3390                 }
3391         }
3392 }
3393
3394 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3395 {
3396         struct amdgpu_device *adev = ring->adev;
3397         struct v9_mqd *mqd = ring->mqd_ptr;
3398         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3399         uint32_t tmp;
3400
3401         mqd->header = 0xC0310800;
3402         mqd->compute_pipelinestat_enable = 0x00000001;
3403         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3404         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3405         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3406         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3407         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3408         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3409         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3410         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3411         mqd->compute_misc_reserved = 0x00000003;
3412
3413         mqd->dynamic_cu_mask_addr_lo =
3414                 lower_32_bits(ring->mqd_gpu_addr
3415                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3416         mqd->dynamic_cu_mask_addr_hi =
3417                 upper_32_bits(ring->mqd_gpu_addr
3418                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3419
3420         eop_base_addr = ring->eop_gpu_addr >> 8;
3421         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3422         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3423
3424         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3425         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3426         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3427                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3428
3429         mqd->cp_hqd_eop_control = tmp;
3430
3431         /* enable doorbell? */
3432         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3433
3434         if (ring->use_doorbell) {
3435                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3436                                     DOORBELL_OFFSET, ring->doorbell_index);
3437                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3438                                     DOORBELL_EN, 1);
3439                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3440                                     DOORBELL_SOURCE, 0);
3441                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3442                                     DOORBELL_HIT, 0);
3443         } else {
3444                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3445                                          DOORBELL_EN, 0);
3446         }
3447
3448         mqd->cp_hqd_pq_doorbell_control = tmp;
3449
3450         /* disable the queue if it's active */
3451         ring->wptr = 0;
3452         mqd->cp_hqd_dequeue_request = 0;
3453         mqd->cp_hqd_pq_rptr = 0;
3454         mqd->cp_hqd_pq_wptr_lo = 0;
3455         mqd->cp_hqd_pq_wptr_hi = 0;
3456
3457         /* set the pointer to the MQD */
3458         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3459         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3460
3461         /* set MQD vmid to 0 */
3462         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3463         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3464         mqd->cp_mqd_control = tmp;
3465
3466         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3467         hqd_gpu_addr = ring->gpu_addr >> 8;
3468         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3469         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3470
3471         /* set up the HQD, this is similar to CP_RB0_CNTL */
3472         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3473         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3474                             (order_base_2(ring->ring_size / 4) - 1));
3475         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3476                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3477 #ifdef __BIG_ENDIAN
3478         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3479 #endif
3480         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3481         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3482         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3483         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3484         mqd->cp_hqd_pq_control = tmp;
3485
3486         /* set the wb address whether it's enabled or not */
3487         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3488         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3489         mqd->cp_hqd_pq_rptr_report_addr_hi =
3490                 upper_32_bits(wb_gpu_addr) & 0xffff;
3491
3492         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3493         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3494         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3495         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3496
3497         tmp = 0;
3498         /* enable the doorbell if requested */
3499         if (ring->use_doorbell) {
3500                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3501                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3502                                 DOORBELL_OFFSET, ring->doorbell_index);
3503
3504                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3505                                          DOORBELL_EN, 1);
3506                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3507                                          DOORBELL_SOURCE, 0);
3508                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3509                                          DOORBELL_HIT, 0);
3510         }
3511
3512         mqd->cp_hqd_pq_doorbell_control = tmp;
3513
3514         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3515         ring->wptr = 0;
3516         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3517
3518         /* set the vmid for the queue */
3519         mqd->cp_hqd_vmid = 0;
3520
3521         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3522         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3523         mqd->cp_hqd_persistent_state = tmp;
3524
3525         /* set MIN_IB_AVAIL_SIZE */
3526         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3527         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3528         mqd->cp_hqd_ib_control = tmp;
3529
3530         /* set static priority for a queue/ring */
3531         gfx_v9_0_mqd_set_priority(ring, mqd);
3532         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3533
3534         /* map_queues packet doesn't need activate the queue,
3535          * so only kiq need set this field.
3536          */
3537         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3538                 mqd->cp_hqd_active = 1;
3539
3540         return 0;
3541 }
3542
3543 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3544 {
3545         struct amdgpu_device *adev = ring->adev;
3546         struct v9_mqd *mqd = ring->mqd_ptr;
3547         int j;
3548
3549         /* disable wptr polling */
3550         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3551
3552         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3553                mqd->cp_hqd_eop_base_addr_lo);
3554         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3555                mqd->cp_hqd_eop_base_addr_hi);
3556
3557         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3558         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3559                mqd->cp_hqd_eop_control);
3560
3561         /* enable doorbell? */
3562         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3563                mqd->cp_hqd_pq_doorbell_control);
3564
3565         /* disable the queue if it's active */
3566         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3567                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3568                 for (j = 0; j < adev->usec_timeout; j++) {
3569                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3570                                 break;
3571                         udelay(1);
3572                 }
3573                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3574                        mqd->cp_hqd_dequeue_request);
3575                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3576                        mqd->cp_hqd_pq_rptr);
3577                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3578                        mqd->cp_hqd_pq_wptr_lo);
3579                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3580                        mqd->cp_hqd_pq_wptr_hi);
3581         }
3582
3583         /* set the pointer to the MQD */
3584         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3585                mqd->cp_mqd_base_addr_lo);
3586         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3587                mqd->cp_mqd_base_addr_hi);
3588
3589         /* set MQD vmid to 0 */
3590         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3591                mqd->cp_mqd_control);
3592
3593         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3594         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3595                mqd->cp_hqd_pq_base_lo);
3596         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3597                mqd->cp_hqd_pq_base_hi);
3598
3599         /* set up the HQD, this is similar to CP_RB0_CNTL */
3600         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3601                mqd->cp_hqd_pq_control);
3602
3603         /* set the wb address whether it's enabled or not */
3604         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3605                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3606         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3607                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3608
3609         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3610         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3611                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3612         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3613                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3614
3615         /* enable the doorbell if requested */
3616         if (ring->use_doorbell) {
3617                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3618                                         (adev->doorbell_index.kiq * 2) << 2);
3619                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3620                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3621         }
3622
3623         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3624                mqd->cp_hqd_pq_doorbell_control);
3625
3626         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3627         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3628                mqd->cp_hqd_pq_wptr_lo);
3629         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3630                mqd->cp_hqd_pq_wptr_hi);
3631
3632         /* set the vmid for the queue */
3633         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3634
3635         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3636                mqd->cp_hqd_persistent_state);
3637
3638         /* activate the queue */
3639         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3640                mqd->cp_hqd_active);
3641
3642         if (ring->use_doorbell)
3643                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3644
3645         return 0;
3646 }
3647
3648 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3649 {
3650         struct amdgpu_device *adev = ring->adev;
3651         int j;
3652
3653         /* disable the queue if it's active */
3654         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3655
3656                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3657
3658                 for (j = 0; j < adev->usec_timeout; j++) {
3659                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3660                                 break;
3661                         udelay(1);
3662                 }
3663
3664                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3665                         DRM_DEBUG("KIQ dequeue request failed.\n");
3666
3667                         /* Manual disable if dequeue request times out */
3668                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3669                 }
3670
3671                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3672                       0);
3673         }
3674
3675         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3676         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3677         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3678         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3679         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3680         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3681         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3682         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3683
3684         return 0;
3685 }
3686
3687 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3688 {
3689         struct amdgpu_device *adev = ring->adev;
3690         struct v9_mqd *mqd = ring->mqd_ptr;
3691         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3692
3693         gfx_v9_0_kiq_setting(ring);
3694
3695         if (adev->in_gpu_reset) { /* for GPU_RESET case */
3696                 /* reset MQD to a clean status */
3697                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3698                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3699
3700                 /* reset ring buffer */
3701                 ring->wptr = 0;
3702                 amdgpu_ring_clear_ring(ring);
3703
3704                 mutex_lock(&adev->srbm_mutex);
3705                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3706                 gfx_v9_0_kiq_init_register(ring);
3707                 soc15_grbm_select(adev, 0, 0, 0, 0);
3708                 mutex_unlock(&adev->srbm_mutex);
3709         } else {
3710                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3711                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3712                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3713                 mutex_lock(&adev->srbm_mutex);
3714                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3715                 gfx_v9_0_mqd_init(ring);
3716                 gfx_v9_0_kiq_init_register(ring);
3717                 soc15_grbm_select(adev, 0, 0, 0, 0);
3718                 mutex_unlock(&adev->srbm_mutex);
3719
3720                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3721                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3722         }
3723
3724         return 0;
3725 }
3726
3727 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3728 {
3729         struct amdgpu_device *adev = ring->adev;
3730         struct v9_mqd *mqd = ring->mqd_ptr;
3731         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3732
3733         if (!adev->in_gpu_reset && !adev->in_suspend) {
3734                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3735                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3736                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3737                 mutex_lock(&adev->srbm_mutex);
3738                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3739                 gfx_v9_0_mqd_init(ring);
3740                 soc15_grbm_select(adev, 0, 0, 0, 0);
3741                 mutex_unlock(&adev->srbm_mutex);
3742
3743                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3744                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3745         } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3746                 /* reset MQD to a clean status */
3747                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3748                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3749
3750                 /* reset ring buffer */
3751                 ring->wptr = 0;
3752                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3753                 amdgpu_ring_clear_ring(ring);
3754         } else {
3755                 amdgpu_ring_clear_ring(ring);
3756         }
3757
3758         return 0;
3759 }
3760
3761 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3762 {
3763         struct amdgpu_ring *ring;
3764         int r;
3765
3766         ring = &adev->gfx.kiq.ring;
3767
3768         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3769         if (unlikely(r != 0))
3770                 return r;
3771
3772         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3773         if (unlikely(r != 0))
3774                 return r;
3775
3776         gfx_v9_0_kiq_init_queue(ring);
3777         amdgpu_bo_kunmap(ring->mqd_obj);
3778         ring->mqd_ptr = NULL;
3779         amdgpu_bo_unreserve(ring->mqd_obj);
3780         ring->sched.ready = true;
3781         return 0;
3782 }
3783
3784 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3785 {
3786         struct amdgpu_ring *ring = NULL;
3787         int r = 0, i;
3788
3789         gfx_v9_0_cp_compute_enable(adev, true);
3790
3791         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3792                 ring = &adev->gfx.compute_ring[i];
3793
3794                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3795                 if (unlikely(r != 0))
3796                         goto done;
3797                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3798                 if (!r) {
3799                         r = gfx_v9_0_kcq_init_queue(ring);
3800                         amdgpu_bo_kunmap(ring->mqd_obj);
3801                         ring->mqd_ptr = NULL;
3802                 }
3803                 amdgpu_bo_unreserve(ring->mqd_obj);
3804                 if (r)
3805                         goto done;
3806         }
3807
3808         r = amdgpu_gfx_enable_kcq(adev);
3809 done:
3810         return r;
3811 }
3812
3813 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3814 {
3815         int r, i;
3816         struct amdgpu_ring *ring;
3817
3818         if (!(adev->flags & AMD_IS_APU))
3819                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3820
3821         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3822                 if (adev->asic_type != CHIP_ARCTURUS) {
3823                         /* legacy firmware loading */
3824                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3825                         if (r)
3826                                 return r;
3827                 }
3828
3829                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3830                 if (r)
3831                         return r;
3832         }
3833
3834         r = gfx_v9_0_kiq_resume(adev);
3835         if (r)
3836                 return r;
3837
3838         if (adev->asic_type != CHIP_ARCTURUS) {
3839                 r = gfx_v9_0_cp_gfx_resume(adev);
3840                 if (r)
3841                         return r;
3842         }
3843
3844         r = gfx_v9_0_kcq_resume(adev);
3845         if (r)
3846                 return r;
3847
3848         if (adev->asic_type != CHIP_ARCTURUS) {
3849                 ring = &adev->gfx.gfx_ring[0];
3850                 r = amdgpu_ring_test_helper(ring);
3851                 if (r)
3852                         return r;
3853         }
3854
3855         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3856                 ring = &adev->gfx.compute_ring[i];
3857                 amdgpu_ring_test_helper(ring);
3858         }
3859
3860         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3861
3862         return 0;
3863 }
3864
3865 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3866 {
3867         u32 tmp;
3868
3869         if (adev->asic_type != CHIP_ARCTURUS)
3870                 return;
3871
3872         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3873         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3874                                 adev->df.hash_status.hash_64k);
3875         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3876                                 adev->df.hash_status.hash_2m);
3877         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3878                                 adev->df.hash_status.hash_1g);
3879         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3880 }
3881
3882 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3883 {
3884         if (adev->asic_type != CHIP_ARCTURUS)
3885                 gfx_v9_0_cp_gfx_enable(adev, enable);
3886         gfx_v9_0_cp_compute_enable(adev, enable);
3887 }
3888
3889 static int gfx_v9_0_hw_init(void *handle)
3890 {
3891         int r;
3892         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3893
3894         if (!amdgpu_sriov_vf(adev))
3895                 gfx_v9_0_init_golden_registers(adev);
3896
3897         gfx_v9_0_constants_init(adev);
3898
3899         gfx_v9_0_init_tcp_config(adev);
3900
3901         r = adev->gfx.rlc.funcs->resume(adev);
3902         if (r)
3903                 return r;
3904
3905         r = gfx_v9_0_cp_resume(adev);
3906         if (r)
3907                 return r;
3908
3909         return r;
3910 }
3911
3912 static int gfx_v9_0_hw_fini(void *handle)
3913 {
3914         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3915
3916         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3917         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3918         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3919
3920         /* DF freeze and kcq disable will fail */
3921         if (!amdgpu_ras_intr_triggered())
3922                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3923                 amdgpu_gfx_disable_kcq(adev);
3924
3925         if (amdgpu_sriov_vf(adev)) {
3926                 gfx_v9_0_cp_gfx_enable(adev, false);
3927                 /* must disable polling for SRIOV when hw finished, otherwise
3928                  * CPC engine may still keep fetching WB address which is already
3929                  * invalid after sw finished and trigger DMAR reading error in
3930                  * hypervisor side.
3931                  */
3932                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3933                 return 0;
3934         }
3935
3936         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3937          * otherwise KIQ is hanging when binding back
3938          */
3939         if (!adev->in_gpu_reset && !adev->in_suspend) {
3940                 mutex_lock(&adev->srbm_mutex);
3941                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3942                                 adev->gfx.kiq.ring.pipe,
3943                                 adev->gfx.kiq.ring.queue, 0);
3944                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3945                 soc15_grbm_select(adev, 0, 0, 0, 0);
3946                 mutex_unlock(&adev->srbm_mutex);
3947         }
3948
3949         gfx_v9_0_cp_enable(adev, false);
3950         adev->gfx.rlc.funcs->stop(adev);
3951
3952         return 0;
3953 }
3954
3955 static int gfx_v9_0_suspend(void *handle)
3956 {
3957         return gfx_v9_0_hw_fini(handle);
3958 }
3959
3960 static int gfx_v9_0_resume(void *handle)
3961 {
3962         return gfx_v9_0_hw_init(handle);
3963 }
3964
3965 static bool gfx_v9_0_is_idle(void *handle)
3966 {
3967         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3968
3969         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3970                                 GRBM_STATUS, GUI_ACTIVE))
3971                 return false;
3972         else
3973                 return true;
3974 }
3975
3976 static int gfx_v9_0_wait_for_idle(void *handle)
3977 {
3978         unsigned i;
3979         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3980
3981         for (i = 0; i < adev->usec_timeout; i++) {
3982                 if (gfx_v9_0_is_idle(handle))
3983                         return 0;
3984                 udelay(1);
3985         }
3986         return -ETIMEDOUT;
3987 }
3988
3989 static int gfx_v9_0_soft_reset(void *handle)
3990 {
3991         u32 grbm_soft_reset = 0;
3992         u32 tmp;
3993         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3994
3995         /* GRBM_STATUS */
3996         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3997         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3998                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3999                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4000                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4001                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4002                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4003                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4004                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4005                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4006                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4007         }
4008
4009         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4010                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4011                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4012         }
4013
4014         /* GRBM_STATUS2 */
4015         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4016         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4017                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4018                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4019
4020
4021         if (grbm_soft_reset) {
4022                 /* stop the rlc */
4023                 adev->gfx.rlc.funcs->stop(adev);
4024
4025                 if (adev->asic_type != CHIP_ARCTURUS)
4026                         /* Disable GFX parsing/prefetching */
4027                         gfx_v9_0_cp_gfx_enable(adev, false);
4028
4029                 /* Disable MEC parsing/prefetching */
4030                 gfx_v9_0_cp_compute_enable(adev, false);
4031
4032                 if (grbm_soft_reset) {
4033                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4034                         tmp |= grbm_soft_reset;
4035                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4036                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4037                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4038
4039                         udelay(50);
4040
4041                         tmp &= ~grbm_soft_reset;
4042                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4043                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4044                 }
4045
4046                 /* Wait a little for things to settle down */
4047                 udelay(50);
4048         }
4049         return 0;
4050 }
4051
4052 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4053 {
4054         signed long r, cnt = 0;
4055         unsigned long flags;
4056         uint32_t seq;
4057         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4058         struct amdgpu_ring *ring = &kiq->ring;
4059
4060         BUG_ON(!ring->funcs->emit_rreg);
4061
4062         spin_lock_irqsave(&kiq->ring_lock, flags);
4063         amdgpu_ring_alloc(ring, 32);
4064         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4065         amdgpu_ring_write(ring, 9 |     /* src: register*/
4066                                 (5 << 8) |      /* dst: memory */
4067                                 (1 << 16) |     /* count sel */
4068                                 (1 << 20));     /* write confirm */
4069         amdgpu_ring_write(ring, 0);
4070         amdgpu_ring_write(ring, 0);
4071         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4072                                 kiq->reg_val_offs * 4));
4073         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4074                                 kiq->reg_val_offs * 4));
4075         amdgpu_fence_emit_polling(ring, &seq);
4076         amdgpu_ring_commit(ring);
4077         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4078
4079         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4080
4081         /* don't wait anymore for gpu reset case because this way may
4082          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4083          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4084          * never return if we keep waiting in virt_kiq_rreg, which cause
4085          * gpu_recover() hang there.
4086          *
4087          * also don't wait anymore for IRQ context
4088          * */
4089         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
4090                 goto failed_kiq_read;
4091
4092         might_sleep();
4093         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4094                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4095                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4096         }
4097
4098         if (cnt > MAX_KIQ_REG_TRY)
4099                 goto failed_kiq_read;
4100
4101         return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
4102                 (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
4103
4104 failed_kiq_read:
4105         pr_err("failed to read gpu clock\n");
4106         return ~0;
4107 }
4108
4109 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4110 {
4111         uint64_t clock;
4112
4113         amdgpu_gfx_off_ctrl(adev, false);
4114         mutex_lock(&adev->gfx.gpu_clock_mutex);
4115         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4116                 clock = gfx_v9_0_kiq_read_clock(adev);
4117         } else {
4118                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4119                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4120                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4121         }
4122         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4123         amdgpu_gfx_off_ctrl(adev, true);
4124         return clock;
4125 }
4126
4127 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4128                                           uint32_t vmid,
4129                                           uint32_t gds_base, uint32_t gds_size,
4130                                           uint32_t gws_base, uint32_t gws_size,
4131                                           uint32_t oa_base, uint32_t oa_size)
4132 {
4133         struct amdgpu_device *adev = ring->adev;
4134
4135         /* GDS Base */
4136         gfx_v9_0_write_data_to_reg(ring, 0, false,
4137                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4138                                    gds_base);
4139
4140         /* GDS Size */
4141         gfx_v9_0_write_data_to_reg(ring, 0, false,
4142                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4143                                    gds_size);
4144
4145         /* GWS */
4146         gfx_v9_0_write_data_to_reg(ring, 0, false,
4147                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4148                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4149
4150         /* OA */
4151         gfx_v9_0_write_data_to_reg(ring, 0, false,
4152                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4153                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4154 }
4155
4156 static const u32 vgpr_init_compute_shader[] =
4157 {
4158         0xb07c0000, 0xbe8000ff,
4159         0x000000f8, 0xbf110800,
4160         0x7e000280, 0x7e020280,
4161         0x7e040280, 0x7e060280,
4162         0x7e080280, 0x7e0a0280,
4163         0x7e0c0280, 0x7e0e0280,
4164         0x80808800, 0xbe803200,
4165         0xbf84fff5, 0xbf9c0000,
4166         0xd28c0001, 0x0001007f,
4167         0xd28d0001, 0x0002027e,
4168         0x10020288, 0xb8810904,
4169         0xb7814000, 0xd1196a01,
4170         0x00000301, 0xbe800087,
4171         0xbefc00c1, 0xd89c4000,
4172         0x00020201, 0xd89cc080,
4173         0x00040401, 0x320202ff,
4174         0x00000800, 0x80808100,
4175         0xbf84fff8, 0x7e020280,
4176         0xbf810000, 0x00000000,
4177 };
4178
4179 static const u32 sgpr_init_compute_shader[] =
4180 {
4181         0xb07c0000, 0xbe8000ff,
4182         0x0000005f, 0xbee50080,
4183         0xbe812c65, 0xbe822c65,
4184         0xbe832c65, 0xbe842c65,
4185         0xbe852c65, 0xb77c0005,
4186         0x80808500, 0xbf84fff8,
4187         0xbe800080, 0xbf810000,
4188 };
4189
4190 static const u32 vgpr_init_compute_shader_arcturus[] = {
4191         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4192         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4193         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4194         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4195         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4196         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4197         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4198         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4199         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4200         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4201         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4202         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4203         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4204         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4205         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4206         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4207         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4208         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4209         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4210         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4211         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4212         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4213         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4214         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4215         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4216         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4217         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4218         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4219         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4220         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4221         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4222         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4223         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4224         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4225         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4226         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4227         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4228         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4229         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4230         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4231         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4232         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4233         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4234         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4235         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4236         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4237         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4238         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4239         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4240         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4241         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4242         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4243         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4244         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4245         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4246         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4247         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4248         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4249         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4250         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4251         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4252         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4253         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4254         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4255         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4256         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4257         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4258         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4259         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4260         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4261         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4262         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4263         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4264         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4265         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4266         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4267         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4268         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4269         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4270         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4271         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4272         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4273         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4274         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4275         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4276         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4277         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4278         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4279         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4280         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4281         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4282         0xbf84fff8, 0xbf810000,
4283 };
4284
4285 /* When below register arrays changed, please update gpr_reg_size,
4286   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4287   to cover all gfx9 ASICs */
4288 static const struct soc15_reg_entry vgpr_init_regs[] = {
4289    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4290    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4291    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4292    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4293    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4294    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4295    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4296    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4297    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4298    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4299    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4300    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4301    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4302    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4303 };
4304
4305 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4306    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4307    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4308    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4309    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4310    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4311    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4312    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4313    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4314    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4315    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4316    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4317    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4318    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4319    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4320 };
4321
4322 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4323    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4324    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4325    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4326    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4327    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4328    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4329    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4330    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4331    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4332    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4333    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4334    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4335    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4336    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4337 };
4338
4339 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4340    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4341    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4342    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4343    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4344    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4345    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4346    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4347    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4348    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4349    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4350    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4351    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4352    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4353    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4354 };
4355
4356 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4357    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4358    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4359    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4360    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4361    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4362    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4363    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4364    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4365    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4366    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4367    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4368    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4369    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4370    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4371    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4372    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4373    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4374    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4375    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4376    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4377    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4378    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4379    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4380    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4381    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4382    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4383    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4384    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4385    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4386    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4387    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4388    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4389    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4390 };
4391
4392 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4393 {
4394         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4395         int i, r;
4396
4397         /* only support when RAS is enabled */
4398         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4399                 return 0;
4400
4401         r = amdgpu_ring_alloc(ring, 7);
4402         if (r) {
4403                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4404                         ring->name, r);
4405                 return r;
4406         }
4407
4408         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4409         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4410
4411         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4412         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4413                                 PACKET3_DMA_DATA_DST_SEL(1) |
4414                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4415                                 PACKET3_DMA_DATA_ENGINE(0)));
4416         amdgpu_ring_write(ring, 0);
4417         amdgpu_ring_write(ring, 0);
4418         amdgpu_ring_write(ring, 0);
4419         amdgpu_ring_write(ring, 0);
4420         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4421                                 adev->gds.gds_size);
4422
4423         amdgpu_ring_commit(ring);
4424
4425         for (i = 0; i < adev->usec_timeout; i++) {
4426                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4427                         break;
4428                 udelay(1);
4429         }
4430
4431         if (i >= adev->usec_timeout)
4432                 r = -ETIMEDOUT;
4433
4434         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4435
4436         return r;
4437 }
4438
4439 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4440 {
4441         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4442         struct amdgpu_ib ib;
4443         struct dma_fence *f = NULL;
4444         int r, i;
4445         unsigned total_size, vgpr_offset, sgpr_offset;
4446         u64 gpu_addr;
4447
4448         int compute_dim_x = adev->gfx.config.max_shader_engines *
4449                                                 adev->gfx.config.max_cu_per_sh *
4450                                                 adev->gfx.config.max_sh_per_se;
4451         int sgpr_work_group_size = 5;
4452         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4453         int vgpr_init_shader_size;
4454         const u32 *vgpr_init_shader_ptr;
4455         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4456
4457         /* only support when RAS is enabled */
4458         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4459                 return 0;
4460
4461         /* bail if the compute ring is not ready */
4462         if (!ring->sched.ready)
4463                 return 0;
4464
4465         if (adev->asic_type == CHIP_ARCTURUS) {
4466                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4467                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4468                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4469         } else {
4470                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4471                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4472                 vgpr_init_regs_ptr = vgpr_init_regs;
4473         }
4474
4475         total_size =
4476                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4477         total_size +=
4478                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4479         total_size +=
4480                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4481         total_size = ALIGN(total_size, 256);
4482         vgpr_offset = total_size;
4483         total_size += ALIGN(vgpr_init_shader_size, 256);
4484         sgpr_offset = total_size;
4485         total_size += sizeof(sgpr_init_compute_shader);
4486
4487         /* allocate an indirect buffer to put the commands in */
4488         memset(&ib, 0, sizeof(ib));
4489         r = amdgpu_ib_get(adev, NULL, total_size,
4490                                         AMDGPU_IB_POOL_DIRECT, &ib);
4491         if (r) {
4492                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4493                 return r;
4494         }
4495
4496         /* load the compute shaders */
4497         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4498                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4499
4500         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4501                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4502
4503         /* init the ib length to 0 */
4504         ib.length_dw = 0;
4505
4506         /* VGPR */
4507         /* write the register state for the compute dispatch */
4508         for (i = 0; i < gpr_reg_size; i++) {
4509                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4510                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4511                                                                 - PACKET3_SET_SH_REG_START;
4512                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4513         }
4514         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4515         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4516         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4517         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4518                                                         - PACKET3_SET_SH_REG_START;
4519         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4520         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4521
4522         /* write dispatch packet */
4523         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4524         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4525         ib.ptr[ib.length_dw++] = 1; /* y */
4526         ib.ptr[ib.length_dw++] = 1; /* z */
4527         ib.ptr[ib.length_dw++] =
4528                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4529
4530         /* write CS partial flush packet */
4531         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4532         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4533
4534         /* SGPR1 */
4535         /* write the register state for the compute dispatch */
4536         for (i = 0; i < gpr_reg_size; i++) {
4537                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4538                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4539                                                                 - PACKET3_SET_SH_REG_START;
4540                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4541         }
4542         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4543         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4544         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4545         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4546                                                         - PACKET3_SET_SH_REG_START;
4547         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4548         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4549
4550         /* write dispatch packet */
4551         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4552         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4553         ib.ptr[ib.length_dw++] = 1; /* y */
4554         ib.ptr[ib.length_dw++] = 1; /* z */
4555         ib.ptr[ib.length_dw++] =
4556                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4557
4558         /* write CS partial flush packet */
4559         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4560         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4561
4562         /* SGPR2 */
4563         /* write the register state for the compute dispatch */
4564         for (i = 0; i < gpr_reg_size; i++) {
4565                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4566                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4567                                                                 - PACKET3_SET_SH_REG_START;
4568                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4569         }
4570         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4571         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4572         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4573         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4574                                                         - PACKET3_SET_SH_REG_START;
4575         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4576         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4577
4578         /* write dispatch packet */
4579         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4580         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4581         ib.ptr[ib.length_dw++] = 1; /* y */
4582         ib.ptr[ib.length_dw++] = 1; /* z */
4583         ib.ptr[ib.length_dw++] =
4584                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4585
4586         /* write CS partial flush packet */
4587         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4588         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4589
4590         /* shedule the ib on the ring */
4591         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4592         if (r) {
4593                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4594                 goto fail;
4595         }
4596
4597         /* wait for the GPU to finish processing the IB */
4598         r = dma_fence_wait(f, false);
4599         if (r) {
4600                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4601                 goto fail;
4602         }
4603
4604 fail:
4605         amdgpu_ib_free(adev, &ib, NULL);
4606         dma_fence_put(f);
4607
4608         return r;
4609 }
4610
4611 static int gfx_v9_0_early_init(void *handle)
4612 {
4613         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4614
4615         if (adev->asic_type == CHIP_ARCTURUS)
4616                 adev->gfx.num_gfx_rings = 0;
4617         else
4618                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4619         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4620         gfx_v9_0_set_kiq_pm4_funcs(adev);
4621         gfx_v9_0_set_ring_funcs(adev);
4622         gfx_v9_0_set_irq_funcs(adev);
4623         gfx_v9_0_set_gds_init(adev);
4624         gfx_v9_0_set_rlc_funcs(adev);
4625
4626         return 0;
4627 }
4628
4629 static int gfx_v9_0_ecc_late_init(void *handle)
4630 {
4631         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4632         int r;
4633
4634         /*
4635          * Temp workaround to fix the issue that CP firmware fails to
4636          * update read pointer when CPDMA is writing clearing operation
4637          * to GDS in suspend/resume sequence on several cards. So just
4638          * limit this operation in cold boot sequence.
4639          */
4640         if (!adev->in_suspend) {
4641                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4642                 if (r)
4643                         return r;
4644         }
4645
4646         /* requires IBs so do in late init after IB pool is initialized */
4647         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4648         if (r)
4649                 return r;
4650
4651         if (adev->gfx.funcs &&
4652             adev->gfx.funcs->reset_ras_error_count)
4653                 adev->gfx.funcs->reset_ras_error_count(adev);
4654
4655         r = amdgpu_gfx_ras_late_init(adev);
4656         if (r)
4657                 return r;
4658
4659         return 0;
4660 }
4661
4662 static int gfx_v9_0_late_init(void *handle)
4663 {
4664         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4665         int r;
4666
4667         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4668         if (r)
4669                 return r;
4670
4671         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4672         if (r)
4673                 return r;
4674
4675         r = gfx_v9_0_ecc_late_init(handle);
4676         if (r)
4677                 return r;
4678
4679         return 0;
4680 }
4681
4682 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4683 {
4684         uint32_t rlc_setting;
4685
4686         /* if RLC is not enabled, do nothing */
4687         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4688         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4689                 return false;
4690
4691         return true;
4692 }
4693
4694 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4695 {
4696         uint32_t data;
4697         unsigned i;
4698
4699         data = RLC_SAFE_MODE__CMD_MASK;
4700         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4701         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4702
4703         /* wait for RLC_SAFE_MODE */
4704         for (i = 0; i < adev->usec_timeout; i++) {
4705                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4706                         break;
4707                 udelay(1);
4708         }
4709 }
4710
4711 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4712 {
4713         uint32_t data;
4714
4715         data = RLC_SAFE_MODE__CMD_MASK;
4716         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4717 }
4718
4719 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4720                                                 bool enable)
4721 {
4722         amdgpu_gfx_rlc_enter_safe_mode(adev);
4723
4724         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4725                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4726                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4727                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4728         } else {
4729                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4730                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4731                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4732         }
4733
4734         amdgpu_gfx_rlc_exit_safe_mode(adev);
4735 }
4736
4737 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4738                                                 bool enable)
4739 {
4740         /* TODO: double check if we need to perform under safe mode */
4741         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4742
4743         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4744                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4745         else
4746                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4747
4748         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4749                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4750         else
4751                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4752
4753         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4754 }
4755
4756 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4757                                                       bool enable)
4758 {
4759         uint32_t data, def;
4760
4761         amdgpu_gfx_rlc_enter_safe_mode(adev);
4762
4763         /* It is disabled by HW by default */
4764         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4765                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4766                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4767
4768                 if (adev->asic_type != CHIP_VEGA12)
4769                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4770
4771                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4772                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4773                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4774
4775                 /* only for Vega10 & Raven1 */
4776                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4777
4778                 if (def != data)
4779                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4780
4781                 /* MGLS is a global flag to control all MGLS in GFX */
4782                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4783                         /* 2 - RLC memory Light sleep */
4784                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4785                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4786                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4787                                 if (def != data)
4788                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4789                         }
4790                         /* 3 - CP memory Light sleep */
4791                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4792                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4793                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4794                                 if (def != data)
4795                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4796                         }
4797                 }
4798         } else {
4799                 /* 1 - MGCG_OVERRIDE */
4800                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4801
4802                 if (adev->asic_type != CHIP_VEGA12)
4803                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4804
4805                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4806                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4807                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4808                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4809
4810                 if (def != data)
4811                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4812
4813                 /* 2 - disable MGLS in RLC */
4814                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4815                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4816                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4817                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4818                 }
4819
4820                 /* 3 - disable MGLS in CP */
4821                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4822                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4823                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4824                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4825                 }
4826         }
4827
4828         amdgpu_gfx_rlc_exit_safe_mode(adev);
4829 }
4830
4831 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4832                                            bool enable)
4833 {
4834         uint32_t data, def;
4835
4836         if (adev->asic_type == CHIP_ARCTURUS)
4837                 return;
4838
4839         amdgpu_gfx_rlc_enter_safe_mode(adev);
4840
4841         /* Enable 3D CGCG/CGLS */
4842         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4843                 /* write cmd to clear cgcg/cgls ov */
4844                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4845                 /* unset CGCG override */
4846                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4847                 /* update CGCG and CGLS override bits */
4848                 if (def != data)
4849                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4850
4851                 /* enable 3Dcgcg FSM(0x0000363f) */
4852                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4853
4854                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4855                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4856                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4857                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4858                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4859                 if (def != data)
4860                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4861
4862                 /* set IDLE_POLL_COUNT(0x00900100) */
4863                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4864                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4865                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4866                 if (def != data)
4867                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4868         } else {
4869                 /* Disable CGCG/CGLS */
4870                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4871                 /* disable cgcg, cgls should be disabled */
4872                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4873                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4874                 /* disable cgcg and cgls in FSM */
4875                 if (def != data)
4876                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4877         }
4878
4879         amdgpu_gfx_rlc_exit_safe_mode(adev);
4880 }
4881
4882 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4883                                                       bool enable)
4884 {
4885         uint32_t def, data;
4886
4887         amdgpu_gfx_rlc_enter_safe_mode(adev);
4888
4889         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4890                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4891                 /* unset CGCG override */
4892                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4893                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4894                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4895                 else
4896                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4897                 /* update CGCG and CGLS override bits */
4898                 if (def != data)
4899                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4900
4901                 /* enable cgcg FSM(0x0000363F) */
4902                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4903
4904                 if (adev->asic_type == CHIP_ARCTURUS)
4905                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4906                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4907                 else
4908                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4909                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4910                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4911                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4912                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4913                 if (def != data)
4914                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4915
4916                 /* set IDLE_POLL_COUNT(0x00900100) */
4917                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4918                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4919                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4920                 if (def != data)
4921                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4922         } else {
4923                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4924                 /* reset CGCG/CGLS bits */
4925                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4926                 /* disable cgcg and cgls in FSM */
4927                 if (def != data)
4928                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4929         }
4930
4931         amdgpu_gfx_rlc_exit_safe_mode(adev);
4932 }
4933
4934 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4935                                             bool enable)
4936 {
4937         if (enable) {
4938                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4939                  * ===  MGCG + MGLS ===
4940                  */
4941                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4942                 /* ===  CGCG /CGLS for GFX 3D Only === */
4943                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4944                 /* ===  CGCG + CGLS === */
4945                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4946         } else {
4947                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4948                  * ===  CGCG + CGLS ===
4949                  */
4950                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4951                 /* ===  CGCG /CGLS for GFX 3D Only === */
4952                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4953                 /* ===  MGCG + MGLS === */
4954                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4955         }
4956         return 0;
4957 }
4958
4959 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4960 {
4961         u32 data;
4962
4963         data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
4964
4965         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4966         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4967
4968         WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
4969 }
4970
4971 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
4972                                         uint32_t offset,
4973                                         struct soc15_reg_rlcg *entries, int arr_size)
4974 {
4975         int i;
4976         uint32_t reg;
4977
4978         if (!entries)
4979                 return false;
4980
4981         for (i = 0; i < arr_size; i++) {
4982                 const struct soc15_reg_rlcg *entry;
4983
4984                 entry = &entries[i];
4985                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
4986                 if (offset == reg)
4987                         return true;
4988         }
4989
4990         return false;
4991 }
4992
4993 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
4994 {
4995         return gfx_v9_0_check_rlcg_range(adev, offset,
4996                                         (void *)rlcg_access_gc_9_0,
4997                                         ARRAY_SIZE(rlcg_access_gc_9_0));
4998 }
4999
5000 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5001         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5002         .set_safe_mode = gfx_v9_0_set_safe_mode,
5003         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5004         .init = gfx_v9_0_rlc_init,
5005         .get_csb_size = gfx_v9_0_get_csb_size,
5006         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5007         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5008         .resume = gfx_v9_0_rlc_resume,
5009         .stop = gfx_v9_0_rlc_stop,
5010         .reset = gfx_v9_0_rlc_reset,
5011         .start = gfx_v9_0_rlc_start,
5012         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5013         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5014         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5015 };
5016
5017 static int gfx_v9_0_set_powergating_state(void *handle,
5018                                           enum amd_powergating_state state)
5019 {
5020         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5021         bool enable = (state == AMD_PG_STATE_GATE);
5022
5023         switch (adev->asic_type) {
5024         case CHIP_RAVEN:
5025         case CHIP_RENOIR:
5026                 if (!enable) {
5027                         amdgpu_gfx_off_ctrl(adev, false);
5028                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
5029                 }
5030                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5031                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5032                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5033                 } else {
5034                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5035                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5036                 }
5037
5038                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5039                         gfx_v9_0_enable_cp_power_gating(adev, true);
5040                 else
5041                         gfx_v9_0_enable_cp_power_gating(adev, false);
5042
5043                 /* update gfx cgpg state */
5044                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5045
5046                 /* update mgcg state */
5047                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5048
5049                 if (enable)
5050                         amdgpu_gfx_off_ctrl(adev, true);
5051                 break;
5052         case CHIP_VEGA12:
5053                 if (!enable) {
5054                         amdgpu_gfx_off_ctrl(adev, false);
5055                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
5056                 } else {
5057                         amdgpu_gfx_off_ctrl(adev, true);
5058                 }
5059                 break;
5060         default:
5061                 break;
5062         }
5063
5064         return 0;
5065 }
5066
5067 static int gfx_v9_0_set_clockgating_state(void *handle,
5068                                           enum amd_clockgating_state state)
5069 {
5070         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5071
5072         if (amdgpu_sriov_vf(adev))
5073                 return 0;
5074
5075         switch (adev->asic_type) {
5076         case CHIP_VEGA10:
5077         case CHIP_VEGA12:
5078         case CHIP_VEGA20:
5079         case CHIP_RAVEN:
5080         case CHIP_ARCTURUS:
5081         case CHIP_RENOIR:
5082                 gfx_v9_0_update_gfx_clock_gating(adev,
5083                                                  state == AMD_CG_STATE_GATE);
5084                 break;
5085         default:
5086                 break;
5087         }
5088         return 0;
5089 }
5090
5091 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5092 {
5093         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5094         int data;
5095
5096         if (amdgpu_sriov_vf(adev))
5097                 *flags = 0;
5098
5099         /* AMD_CG_SUPPORT_GFX_MGCG */
5100         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5101         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5102                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5103
5104         /* AMD_CG_SUPPORT_GFX_CGCG */
5105         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5106         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5107                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5108
5109         /* AMD_CG_SUPPORT_GFX_CGLS */
5110         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5111                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5112
5113         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5114         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5115         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5116                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5117
5118         /* AMD_CG_SUPPORT_GFX_CP_LS */
5119         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5120         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5121                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5122
5123         if (adev->asic_type != CHIP_ARCTURUS) {
5124                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5125                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5126                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5127                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5128
5129                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5130                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5131                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5132         }
5133 }
5134
5135 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5136 {
5137         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5138 }
5139
5140 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5141 {
5142         struct amdgpu_device *adev = ring->adev;
5143         u64 wptr;
5144
5145         /* XXX check if swapping is necessary on BE */
5146         if (ring->use_doorbell) {
5147                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5148         } else {
5149                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5150                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5151         }
5152
5153         return wptr;
5154 }
5155
5156 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5157 {
5158         struct amdgpu_device *adev = ring->adev;
5159
5160         if (ring->use_doorbell) {
5161                 /* XXX check if swapping is necessary on BE */
5162                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5163                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5164         } else {
5165                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5166                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5167         }
5168 }
5169
5170 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5171 {
5172         struct amdgpu_device *adev = ring->adev;
5173         u32 ref_and_mask, reg_mem_engine;
5174         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5175
5176         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5177                 switch (ring->me) {
5178                 case 1:
5179                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5180                         break;
5181                 case 2:
5182                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5183                         break;
5184                 default:
5185                         return;
5186                 }
5187                 reg_mem_engine = 0;
5188         } else {
5189                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5190                 reg_mem_engine = 1; /* pfp */
5191         }
5192
5193         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5194                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5195                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5196                               ref_and_mask, ref_and_mask, 0x20);
5197 }
5198
5199 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5200                                         struct amdgpu_job *job,
5201                                         struct amdgpu_ib *ib,
5202                                         uint32_t flags)
5203 {
5204         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5205         u32 header, control = 0;
5206
5207         if (ib->flags & AMDGPU_IB_FLAG_CE)
5208                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5209         else
5210                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5211
5212         control |= ib->length_dw | (vmid << 24);
5213
5214         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5215                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5216
5217                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5218                         gfx_v9_0_ring_emit_de_meta(ring);
5219         }
5220
5221         amdgpu_ring_write(ring, header);
5222         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5223         amdgpu_ring_write(ring,
5224 #ifdef __BIG_ENDIAN
5225                 (2 << 0) |
5226 #endif
5227                 lower_32_bits(ib->gpu_addr));
5228         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5229         amdgpu_ring_write(ring, control);
5230 }
5231
5232 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5233                                           struct amdgpu_job *job,
5234                                           struct amdgpu_ib *ib,
5235                                           uint32_t flags)
5236 {
5237         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5238         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5239
5240         /* Currently, there is a high possibility to get wave ID mismatch
5241          * between ME and GDS, leading to a hw deadlock, because ME generates
5242          * different wave IDs than the GDS expects. This situation happens
5243          * randomly when at least 5 compute pipes use GDS ordered append.
5244          * The wave IDs generated by ME are also wrong after suspend/resume.
5245          * Those are probably bugs somewhere else in the kernel driver.
5246          *
5247          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5248          * GDS to 0 for this ring (me/pipe).
5249          */
5250         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5251                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5252                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5253                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5254         }
5255
5256         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5257         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5258         amdgpu_ring_write(ring,
5259 #ifdef __BIG_ENDIAN
5260                                 (2 << 0) |
5261 #endif
5262                                 lower_32_bits(ib->gpu_addr));
5263         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5264         amdgpu_ring_write(ring, control);
5265 }
5266
5267 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5268                                      u64 seq, unsigned flags)
5269 {
5270         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5271         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5272         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5273
5274         /* RELEASE_MEM - flush caches, send int */
5275         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5276         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5277                                                EOP_TC_NC_ACTION_EN) :
5278                                               (EOP_TCL1_ACTION_EN |
5279                                                EOP_TC_ACTION_EN |
5280                                                EOP_TC_WB_ACTION_EN |
5281                                                EOP_TC_MD_ACTION_EN)) |
5282                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5283                                  EVENT_INDEX(5)));
5284         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5285
5286         /*
5287          * the address should be Qword aligned if 64bit write, Dword
5288          * aligned if only send 32bit data low (discard data high)
5289          */
5290         if (write64bit)
5291                 BUG_ON(addr & 0x7);
5292         else
5293                 BUG_ON(addr & 0x3);
5294         amdgpu_ring_write(ring, lower_32_bits(addr));
5295         amdgpu_ring_write(ring, upper_32_bits(addr));
5296         amdgpu_ring_write(ring, lower_32_bits(seq));
5297         amdgpu_ring_write(ring, upper_32_bits(seq));
5298         amdgpu_ring_write(ring, 0);
5299 }
5300
5301 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5302 {
5303         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5304         uint32_t seq = ring->fence_drv.sync_seq;
5305         uint64_t addr = ring->fence_drv.gpu_addr;
5306
5307         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5308                               lower_32_bits(addr), upper_32_bits(addr),
5309                               seq, 0xffffffff, 4);
5310 }
5311
5312 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5313                                         unsigned vmid, uint64_t pd_addr)
5314 {
5315         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5316
5317         /* compute doesn't have PFP */
5318         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5319                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5320                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5321                 amdgpu_ring_write(ring, 0x0);
5322         }
5323 }
5324
5325 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5326 {
5327         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5328 }
5329
5330 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5331 {
5332         u64 wptr;
5333
5334         /* XXX check if swapping is necessary on BE */
5335         if (ring->use_doorbell)
5336                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5337         else
5338                 BUG();
5339         return wptr;
5340 }
5341
5342 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5343 {
5344         struct amdgpu_device *adev = ring->adev;
5345
5346         /* XXX check if swapping is necessary on BE */
5347         if (ring->use_doorbell) {
5348                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5349                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5350         } else{
5351                 BUG(); /* only DOORBELL method supported on gfx9 now */
5352         }
5353 }
5354
5355 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5356                                          u64 seq, unsigned int flags)
5357 {
5358         struct amdgpu_device *adev = ring->adev;
5359
5360         /* we only allocate 32bit for each seq wb address */
5361         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5362
5363         /* write fence seq to the "addr" */
5364         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5365         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5366                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5367         amdgpu_ring_write(ring, lower_32_bits(addr));
5368         amdgpu_ring_write(ring, upper_32_bits(addr));
5369         amdgpu_ring_write(ring, lower_32_bits(seq));
5370
5371         if (flags & AMDGPU_FENCE_FLAG_INT) {
5372                 /* set register to trigger INT */
5373                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5374                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5375                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5376                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5377                 amdgpu_ring_write(ring, 0);
5378                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5379         }
5380 }
5381
5382 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5383 {
5384         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5385         amdgpu_ring_write(ring, 0);
5386 }
5387
5388 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5389 {
5390         struct v9_ce_ib_state ce_payload = {0};
5391         uint64_t csa_addr;
5392         int cnt;
5393
5394         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5395         csa_addr = amdgpu_csa_vaddr(ring->adev);
5396
5397         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5398         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5399                                  WRITE_DATA_DST_SEL(8) |
5400                                  WR_CONFIRM) |
5401                                  WRITE_DATA_CACHE_POLICY(0));
5402         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5403         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5404         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5405 }
5406
5407 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5408 {
5409         struct v9_de_ib_state de_payload = {0};
5410         uint64_t csa_addr, gds_addr;
5411         int cnt;
5412
5413         csa_addr = amdgpu_csa_vaddr(ring->adev);
5414         gds_addr = csa_addr + 4096;
5415         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5416         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5417
5418         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5419         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5420         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5421                                  WRITE_DATA_DST_SEL(8) |
5422                                  WR_CONFIRM) |
5423                                  WRITE_DATA_CACHE_POLICY(0));
5424         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5425         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5426         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5427 }
5428
5429 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
5430 {
5431         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5432         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
5433 }
5434
5435 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5436 {
5437         uint32_t dw2 = 0;
5438
5439         if (amdgpu_sriov_vf(ring->adev))
5440                 gfx_v9_0_ring_emit_ce_meta(ring);
5441
5442         gfx_v9_0_ring_emit_tmz(ring, true);
5443
5444         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5445         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5446                 /* set load_global_config & load_global_uconfig */
5447                 dw2 |= 0x8001;
5448                 /* set load_cs_sh_regs */
5449                 dw2 |= 0x01000000;
5450                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5451                 dw2 |= 0x10002;
5452
5453                 /* set load_ce_ram if preamble presented */
5454                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5455                         dw2 |= 0x10000000;
5456         } else {
5457                 /* still load_ce_ram if this is the first time preamble presented
5458                  * although there is no context switch happens.
5459                  */
5460                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5461                         dw2 |= 0x10000000;
5462         }
5463
5464         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5465         amdgpu_ring_write(ring, dw2);
5466         amdgpu_ring_write(ring, 0);
5467 }
5468
5469 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5470 {
5471         unsigned ret;
5472         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5473         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5474         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5475         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5476         ret = ring->wptr & ring->buf_mask;
5477         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5478         return ret;
5479 }
5480
5481 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5482 {
5483         unsigned cur;
5484         BUG_ON(offset > ring->buf_mask);
5485         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5486
5487         cur = (ring->wptr & ring->buf_mask) - 1;
5488         if (likely(cur > offset))
5489                 ring->ring[offset] = cur - offset;
5490         else
5491                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5492 }
5493
5494 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
5495 {
5496         struct amdgpu_device *adev = ring->adev;
5497         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
5498
5499         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5500         amdgpu_ring_write(ring, 0 |     /* src: register*/
5501                                 (5 << 8) |      /* dst: memory */
5502                                 (1 << 20));     /* write confirm */
5503         amdgpu_ring_write(ring, reg);
5504         amdgpu_ring_write(ring, 0);
5505         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5506                                 kiq->reg_val_offs * 4));
5507         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5508                                 kiq->reg_val_offs * 4));
5509 }
5510
5511 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5512                                     uint32_t val)
5513 {
5514         uint32_t cmd = 0;
5515
5516         switch (ring->funcs->type) {
5517         case AMDGPU_RING_TYPE_GFX:
5518                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5519                 break;
5520         case AMDGPU_RING_TYPE_KIQ:
5521                 cmd = (1 << 16); /* no inc addr */
5522                 break;
5523         default:
5524                 cmd = WR_CONFIRM;
5525                 break;
5526         }
5527         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5528         amdgpu_ring_write(ring, cmd);
5529         amdgpu_ring_write(ring, reg);
5530         amdgpu_ring_write(ring, 0);
5531         amdgpu_ring_write(ring, val);
5532 }
5533
5534 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5535                                         uint32_t val, uint32_t mask)
5536 {
5537         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5538 }
5539
5540 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5541                                                   uint32_t reg0, uint32_t reg1,
5542                                                   uint32_t ref, uint32_t mask)
5543 {
5544         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5545         struct amdgpu_device *adev = ring->adev;
5546         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5547                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5548
5549         if (fw_version_ok)
5550                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5551                                       ref, mask, 0x20);
5552         else
5553                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5554                                                            ref, mask);
5555 }
5556
5557 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5558 {
5559         struct amdgpu_device *adev = ring->adev;
5560         uint32_t value = 0;
5561
5562         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5563         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5564         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5565         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5566         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5567 }
5568
5569 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5570                                                  enum amdgpu_interrupt_state state)
5571 {
5572         switch (state) {
5573         case AMDGPU_IRQ_STATE_DISABLE:
5574         case AMDGPU_IRQ_STATE_ENABLE:
5575                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5576                                TIME_STAMP_INT_ENABLE,
5577                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5578                 break;
5579         default:
5580                 break;
5581         }
5582 }
5583
5584 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5585                                                      int me, int pipe,
5586                                                      enum amdgpu_interrupt_state state)
5587 {
5588         u32 mec_int_cntl, mec_int_cntl_reg;
5589
5590         /*
5591          * amdgpu controls only the first MEC. That's why this function only
5592          * handles the setting of interrupts for this specific MEC. All other
5593          * pipes' interrupts are set by amdkfd.
5594          */
5595
5596         if (me == 1) {
5597                 switch (pipe) {
5598                 case 0:
5599                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5600                         break;
5601                 case 1:
5602                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5603                         break;
5604                 case 2:
5605                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5606                         break;
5607                 case 3:
5608                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5609                         break;
5610                 default:
5611                         DRM_DEBUG("invalid pipe %d\n", pipe);
5612                         return;
5613                 }
5614         } else {
5615                 DRM_DEBUG("invalid me %d\n", me);
5616                 return;
5617         }
5618
5619         switch (state) {
5620         case AMDGPU_IRQ_STATE_DISABLE:
5621                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5622                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5623                                              TIME_STAMP_INT_ENABLE, 0);
5624                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5625                 break;
5626         case AMDGPU_IRQ_STATE_ENABLE:
5627                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5628                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5629                                              TIME_STAMP_INT_ENABLE, 1);
5630                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5631                 break;
5632         default:
5633                 break;
5634         }
5635 }
5636
5637 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5638                                              struct amdgpu_irq_src *source,
5639                                              unsigned type,
5640                                              enum amdgpu_interrupt_state state)
5641 {
5642         switch (state) {
5643         case AMDGPU_IRQ_STATE_DISABLE:
5644         case AMDGPU_IRQ_STATE_ENABLE:
5645                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5646                                PRIV_REG_INT_ENABLE,
5647                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5648                 break;
5649         default:
5650                 break;
5651         }
5652
5653         return 0;
5654 }
5655
5656 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5657                                               struct amdgpu_irq_src *source,
5658                                               unsigned type,
5659                                               enum amdgpu_interrupt_state state)
5660 {
5661         switch (state) {
5662         case AMDGPU_IRQ_STATE_DISABLE:
5663         case AMDGPU_IRQ_STATE_ENABLE:
5664                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5665                                PRIV_INSTR_INT_ENABLE,
5666                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5667         default:
5668                 break;
5669         }
5670
5671         return 0;
5672 }
5673
5674 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5675         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5676                         CP_ECC_ERROR_INT_ENABLE, 1)
5677
5678 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5679         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5680                         CP_ECC_ERROR_INT_ENABLE, 0)
5681
5682 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5683                                               struct amdgpu_irq_src *source,
5684                                               unsigned type,
5685                                               enum amdgpu_interrupt_state state)
5686 {
5687         switch (state) {
5688         case AMDGPU_IRQ_STATE_DISABLE:
5689                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5690                                 CP_ECC_ERROR_INT_ENABLE, 0);
5691                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5692                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5693                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5694                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5695                 break;
5696
5697         case AMDGPU_IRQ_STATE_ENABLE:
5698                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5699                                 CP_ECC_ERROR_INT_ENABLE, 1);
5700                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5701                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5702                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5703                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5704                 break;
5705         default:
5706                 break;
5707         }
5708
5709         return 0;
5710 }
5711
5712
5713 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5714                                             struct amdgpu_irq_src *src,
5715                                             unsigned type,
5716                                             enum amdgpu_interrupt_state state)
5717 {
5718         switch (type) {
5719         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5720                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5721                 break;
5722         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5723                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5724                 break;
5725         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5726                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5727                 break;
5728         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5729                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5730                 break;
5731         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5732                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5733                 break;
5734         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5735                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5736                 break;
5737         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5738                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5739                 break;
5740         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5741                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5742                 break;
5743         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5744                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5745                 break;
5746         default:
5747                 break;
5748         }
5749         return 0;
5750 }
5751
5752 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5753                             struct amdgpu_irq_src *source,
5754                             struct amdgpu_iv_entry *entry)
5755 {
5756         int i;
5757         u8 me_id, pipe_id, queue_id;
5758         struct amdgpu_ring *ring;
5759
5760         DRM_DEBUG("IH: CP EOP\n");
5761         me_id = (entry->ring_id & 0x0c) >> 2;
5762         pipe_id = (entry->ring_id & 0x03) >> 0;
5763         queue_id = (entry->ring_id & 0x70) >> 4;
5764
5765         switch (me_id) {
5766         case 0:
5767                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5768                 break;
5769         case 1:
5770         case 2:
5771                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5772                         ring = &adev->gfx.compute_ring[i];
5773                         /* Per-queue interrupt is supported for MEC starting from VI.
5774                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5775                           */
5776                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5777                                 amdgpu_fence_process(ring);
5778                 }
5779                 break;
5780         }
5781         return 0;
5782 }
5783
5784 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5785                            struct amdgpu_iv_entry *entry)
5786 {
5787         u8 me_id, pipe_id, queue_id;
5788         struct amdgpu_ring *ring;
5789         int i;
5790
5791         me_id = (entry->ring_id & 0x0c) >> 2;
5792         pipe_id = (entry->ring_id & 0x03) >> 0;
5793         queue_id = (entry->ring_id & 0x70) >> 4;
5794
5795         switch (me_id) {
5796         case 0:
5797                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5798                 break;
5799         case 1:
5800         case 2:
5801                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5802                         ring = &adev->gfx.compute_ring[i];
5803                         if (ring->me == me_id && ring->pipe == pipe_id &&
5804                             ring->queue == queue_id)
5805                                 drm_sched_fault(&ring->sched);
5806                 }
5807                 break;
5808         }
5809 }
5810
5811 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5812                                  struct amdgpu_irq_src *source,
5813                                  struct amdgpu_iv_entry *entry)
5814 {
5815         DRM_ERROR("Illegal register access in command stream\n");
5816         gfx_v9_0_fault(adev, entry);
5817         return 0;
5818 }
5819
5820 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5821                                   struct amdgpu_irq_src *source,
5822                                   struct amdgpu_iv_entry *entry)
5823 {
5824         DRM_ERROR("Illegal instruction in command stream\n");
5825         gfx_v9_0_fault(adev, entry);
5826         return 0;
5827 }
5828
5829
5830 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5831         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5832           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5833           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5834         },
5835         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5836           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5837           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5838         },
5839         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5840           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5841           0, 0
5842         },
5843         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5844           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5845           0, 0
5846         },
5847         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5848           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5849           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5850         },
5851         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5852           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5853           0, 0
5854         },
5855         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5856           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5857           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5858         },
5859         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5860           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5861           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5862         },
5863         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5864           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5865           0, 0
5866         },
5867         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5868           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5869           0, 0
5870         },
5871         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5872           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5873           0, 0
5874         },
5875         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5876           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5877           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5878         },
5879         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5880           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5881           0, 0
5882         },
5883         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5884           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5885           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5886         },
5887         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5888           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5889           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5890           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5891         },
5892         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5893           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5894           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5895           0, 0
5896         },
5897         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5898           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5899           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5900           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5901         },
5902         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5903           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5904           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5905           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5906         },
5907         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5908           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5909           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5910           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5911         },
5912         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5913           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5914           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5915           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5916         },
5917         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5918           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5919           0, 0
5920         },
5921         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5922           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5923           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5924         },
5925         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5926           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5927           0, 0
5928         },
5929         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5930           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5931           0, 0
5932         },
5933         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5934           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5935           0, 0
5936         },
5937         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5938           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5939           0, 0
5940         },
5941         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5942           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5943           0, 0
5944         },
5945         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5946           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5947           0, 0
5948         },
5949         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5950           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5951           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5952         },
5953         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5954           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5955           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5956         },
5957         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5958           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5959           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5960         },
5961         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5962           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5963           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5964         },
5965         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5966           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5967           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5968         },
5969         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5970           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
5971           0, 0
5972         },
5973         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5974           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
5975           0, 0
5976         },
5977         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5978           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
5979           0, 0
5980         },
5981         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5982           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
5983           0, 0
5984         },
5985         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5986           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
5987           0, 0
5988         },
5989         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5990           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
5991           0, 0
5992         },
5993         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5994           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
5995           0, 0
5996         },
5997         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5998           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
5999           0, 0
6000         },
6001         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6002           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6003           0, 0
6004         },
6005         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6006           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6007           0, 0
6008         },
6009         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6010           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6011           0, 0
6012         },
6013         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6014           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6015           0, 0
6016         },
6017         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6018           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6019           0, 0
6020         },
6021         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6022           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6023           0, 0
6024         },
6025         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6026           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6027           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6028         },
6029         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6030           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6031           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6032         },
6033         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6034           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6035           0, 0
6036         },
6037         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6038           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6039           0, 0
6040         },
6041         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6042           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6043           0, 0
6044         },
6045         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6046           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6047           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6048         },
6049         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6050           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6051           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6052         },
6053         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6054           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6055           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6056         },
6057         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6058           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6059           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6060         },
6061         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6062           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6063           0, 0
6064         },
6065         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6066           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6067           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6068         },
6069         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6070           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6071           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6072         },
6073         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6074           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6075           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6076         },
6077         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6078           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6079           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6080         },
6081         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6082           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6083           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6084         },
6085         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6086           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6087           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6088         },
6089         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6090           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6091           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6092         },
6093         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6094           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6095           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6096         },
6097         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6098           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6099           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6100         },
6101         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6102           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6103           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6104         },
6105         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6106           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6107           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6108         },
6109         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6110           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6111           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6112         },
6113         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6114           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6115           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6116         },
6117         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6118           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6119           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6120         },
6121         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6122           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6123           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6124         },
6125         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6126           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6127           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6128         },
6129         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6130           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6131           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6132         },
6133         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6134           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6135           0, 0
6136         },
6137         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6138           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6139           0, 0
6140         },
6141         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6142           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6143           0, 0
6144         },
6145         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6146           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6147           0, 0
6148         },
6149         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6150           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6151           0, 0
6152         },
6153         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6154           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6155           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6156         },
6157         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6158           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6159           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6160         },
6161         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6162           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6163           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6164         },
6165         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6166           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6167           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6168         },
6169         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6170           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6171           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6172         },
6173         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6174           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6175           0, 0
6176         },
6177         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6178           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6179           0, 0
6180         },
6181         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6182           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6183           0, 0
6184         },
6185         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6186           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6187           0, 0
6188         },
6189         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6190           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6191           0, 0
6192         },
6193         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6194           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6195           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6196         },
6197         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6198           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6199           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6200         },
6201         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6202           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6203           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6204         },
6205         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6206           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6207           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6208         },
6209         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6210           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6211           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6212         },
6213         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6214           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6215           0, 0
6216         },
6217         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6218           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6219           0, 0
6220         },
6221         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6222           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6223           0, 0
6224         },
6225         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6226           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6227           0, 0
6228         },
6229         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6230           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6231           0, 0
6232         },
6233         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6234           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6235           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6236         },
6237         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6238           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6239           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6240         },
6241         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6242           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6243           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6244         },
6245         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6246           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6247           0, 0
6248         },
6249         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6250           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6251           0, 0
6252         },
6253         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6254           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6255           0, 0
6256         },
6257         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6258           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6259           0, 0
6260         },
6261         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6262           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6263           0, 0
6264         },
6265         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6266           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6267           0, 0
6268         }
6269 };
6270
6271 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6272                                      void *inject_if)
6273 {
6274         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6275         int ret;
6276         struct ta_ras_trigger_error_input block_info = { 0 };
6277
6278         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6279                 return -EINVAL;
6280
6281         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6282                 return -EINVAL;
6283
6284         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6285                 return -EPERM;
6286
6287         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6288               info->head.type)) {
6289                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6290                         ras_gfx_subblocks[info->head.sub_block_index].name,
6291                         info->head.type);
6292                 return -EPERM;
6293         }
6294
6295         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6296               info->head.type)) {
6297                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6298                         ras_gfx_subblocks[info->head.sub_block_index].name,
6299                         info->head.type);
6300                 return -EPERM;
6301         }
6302
6303         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6304         block_info.sub_block_index =
6305                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6306         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6307         block_info.address = info->address;
6308         block_info.value = info->value;
6309
6310         mutex_lock(&adev->grbm_idx_mutex);
6311         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6312         mutex_unlock(&adev->grbm_idx_mutex);
6313
6314         return ret;
6315 }
6316
6317 static const char *vml2_mems[] = {
6318         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6319         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6320         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6321         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6322         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6323         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6324         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6325         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6326         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6327         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6328         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6329         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6330         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6331         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6332         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6333         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6334 };
6335
6336 static const char *vml2_walker_mems[] = {
6337         "UTC_VML2_CACHE_PDE0_MEM0",
6338         "UTC_VML2_CACHE_PDE0_MEM1",
6339         "UTC_VML2_CACHE_PDE1_MEM0",
6340         "UTC_VML2_CACHE_PDE1_MEM1",
6341         "UTC_VML2_CACHE_PDE2_MEM0",
6342         "UTC_VML2_CACHE_PDE2_MEM1",
6343         "UTC_VML2_RDIF_LOG_FIFO",
6344 };
6345
6346 static const char *atc_l2_cache_2m_mems[] = {
6347         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6348         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6349         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6350         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6351 };
6352
6353 static const char *atc_l2_cache_4k_mems[] = {
6354         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6355         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6356         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6357         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6358         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6359         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6360         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6361         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6362         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6363         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6364         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6365         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6366         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6367         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6368         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6369         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6370         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6371         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6372         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6373         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6374         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6375         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6376         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6377         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6378         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6379         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6380         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6381         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6382         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6383         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6384         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6385         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6386 };
6387
6388 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6389                                          struct ras_err_data *err_data)
6390 {
6391         uint32_t i, data;
6392         uint32_t sec_count, ded_count;
6393
6394         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6395         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6396         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6397         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6398         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6399         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6400         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6401         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6402
6403         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6404                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6405                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6406
6407                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6408                 if (sec_count) {
6409                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6410                                  vml2_mems[i], sec_count);
6411                         err_data->ce_count += sec_count;
6412                 }
6413
6414                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6415                 if (ded_count) {
6416                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6417                                  vml2_mems[i], ded_count);
6418                         err_data->ue_count += ded_count;
6419                 }
6420         }
6421
6422         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6423                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6424                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6425
6426                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6427                                                 SEC_COUNT);
6428                 if (sec_count) {
6429                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6430                                  vml2_walker_mems[i], sec_count);
6431                         err_data->ce_count += sec_count;
6432                 }
6433
6434                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6435                                                 DED_COUNT);
6436                 if (ded_count) {
6437                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6438                                  vml2_walker_mems[i], ded_count);
6439                         err_data->ue_count += ded_count;
6440                 }
6441         }
6442
6443         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6444                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6445                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6446
6447                 sec_count = (data & 0x00006000L) >> 0xd;
6448                 if (sec_count) {
6449                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6450                                  atc_l2_cache_2m_mems[i], sec_count);
6451                         err_data->ce_count += sec_count;
6452                 }
6453         }
6454
6455         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6456                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6457                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6458
6459                 sec_count = (data & 0x00006000L) >> 0xd;
6460                 if (sec_count) {
6461                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6462                                  atc_l2_cache_4k_mems[i], sec_count);
6463                         err_data->ce_count += sec_count;
6464                 }
6465
6466                 ded_count = (data & 0x00018000L) >> 0xf;
6467                 if (ded_count) {
6468                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6469                                  atc_l2_cache_4k_mems[i], ded_count);
6470                         err_data->ue_count += ded_count;
6471                 }
6472         }
6473
6474         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6475         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6476         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6477         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6478
6479         return 0;
6480 }
6481
6482 static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
6483         uint32_t se_id, uint32_t inst_id, uint32_t value,
6484         uint32_t *sec_count, uint32_t *ded_count)
6485 {
6486         uint32_t i;
6487         uint32_t sec_cnt, ded_cnt;
6488
6489         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6490                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6491                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6492                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6493                         continue;
6494
6495                 sec_cnt = (value &
6496                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6497                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6498                 if (sec_cnt) {
6499                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
6500                                 gfx_v9_0_ras_fields[i].name,
6501                                 se_id, inst_id,
6502                                 sec_cnt);
6503                         *sec_count += sec_cnt;
6504                 }
6505
6506                 ded_cnt = (value &
6507                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6508                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6509                 if (ded_cnt) {
6510                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
6511                                 gfx_v9_0_ras_fields[i].name,
6512                                 se_id, inst_id,
6513                                 ded_cnt);
6514                         *ded_count += ded_cnt;
6515                 }
6516         }
6517
6518         return 0;
6519 }
6520
6521 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6522 {
6523         int i, j, k;
6524
6525         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6526                 return;
6527
6528         /* read back registers to clear the counters */
6529         mutex_lock(&adev->grbm_idx_mutex);
6530         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6531                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6532                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6533                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6534                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6535                         }
6536                 }
6537         }
6538         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6539         mutex_unlock(&adev->grbm_idx_mutex);
6540
6541         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6542         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6543         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6544         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6545         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6546         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6547         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6548         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6549
6550         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6551                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6552                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6553         }
6554
6555         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6556                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6557                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6558         }
6559
6560         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6561                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6562                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6563         }
6564
6565         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6566                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6567                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6568         }
6569
6570         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6571         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6572         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6573         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6574 }
6575
6576 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6577                                           void *ras_error_status)
6578 {
6579         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6580         uint32_t sec_count = 0, ded_count = 0;
6581         uint32_t i, j, k;
6582         uint32_t reg_value;
6583
6584         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6585                 return -EINVAL;
6586
6587         err_data->ue_count = 0;
6588         err_data->ce_count = 0;
6589
6590         mutex_lock(&adev->grbm_idx_mutex);
6591
6592         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6593                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6594                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6595                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6596                                 reg_value =
6597                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6598                                 if (reg_value)
6599                                         gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
6600                                                         j, k, reg_value,
6601                                                         &sec_count, &ded_count);
6602                         }
6603                 }
6604         }
6605
6606         err_data->ce_count += sec_count;
6607         err_data->ue_count += ded_count;
6608
6609         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6610         mutex_unlock(&adev->grbm_idx_mutex);
6611
6612         gfx_v9_0_query_utc_edc_status(adev, err_data);
6613
6614         return 0;
6615 }
6616
6617 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6618         .name = "gfx_v9_0",
6619         .early_init = gfx_v9_0_early_init,
6620         .late_init = gfx_v9_0_late_init,
6621         .sw_init = gfx_v9_0_sw_init,
6622         .sw_fini = gfx_v9_0_sw_fini,
6623         .hw_init = gfx_v9_0_hw_init,
6624         .hw_fini = gfx_v9_0_hw_fini,
6625         .suspend = gfx_v9_0_suspend,
6626         .resume = gfx_v9_0_resume,
6627         .is_idle = gfx_v9_0_is_idle,
6628         .wait_for_idle = gfx_v9_0_wait_for_idle,
6629         .soft_reset = gfx_v9_0_soft_reset,
6630         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6631         .set_powergating_state = gfx_v9_0_set_powergating_state,
6632         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6633 };
6634
6635 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6636         .type = AMDGPU_RING_TYPE_GFX,
6637         .align_mask = 0xff,
6638         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6639         .support_64bit_ptrs = true,
6640         .vmhub = AMDGPU_GFXHUB_0,
6641         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6642         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6643         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6644         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6645                 5 +  /* COND_EXEC */
6646                 7 +  /* PIPELINE_SYNC */
6647                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6648                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6649                 2 + /* VM_FLUSH */
6650                 8 +  /* FENCE for VM_FLUSH */
6651                 20 + /* GDS switch */
6652                 4 + /* double SWITCH_BUFFER,
6653                        the first COND_EXEC jump to the place just
6654                            prior to this double SWITCH_BUFFER  */
6655                 5 + /* COND_EXEC */
6656                 7 +      /*     HDP_flush */
6657                 4 +      /*     VGT_flush */
6658                 14 + /* CE_META */
6659                 31 + /* DE_META */
6660                 3 + /* CNTX_CTRL */
6661                 5 + /* HDP_INVL */
6662                 8 + 8 + /* FENCE x2 */
6663                 2, /* SWITCH_BUFFER */
6664         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6665         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6666         .emit_fence = gfx_v9_0_ring_emit_fence,
6667         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6668         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6669         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6670         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6671         .test_ring = gfx_v9_0_ring_test_ring,
6672         .test_ib = gfx_v9_0_ring_test_ib,
6673         .insert_nop = amdgpu_ring_insert_nop,
6674         .pad_ib = amdgpu_ring_generic_pad_ib,
6675         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6676         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6677         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6678         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6679         .emit_tmz = gfx_v9_0_ring_emit_tmz,
6680         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6681         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6682         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6683         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6684 };
6685
6686 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6687         .type = AMDGPU_RING_TYPE_COMPUTE,
6688         .align_mask = 0xff,
6689         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6690         .support_64bit_ptrs = true,
6691         .vmhub = AMDGPU_GFXHUB_0,
6692         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6693         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6694         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6695         .emit_frame_size =
6696                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6697                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6698                 5 + /* hdp invalidate */
6699                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6700                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6701                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6702                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6703                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6704         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6705         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6706         .emit_fence = gfx_v9_0_ring_emit_fence,
6707         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6708         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6709         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6710         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6711         .test_ring = gfx_v9_0_ring_test_ring,
6712         .test_ib = gfx_v9_0_ring_test_ib,
6713         .insert_nop = amdgpu_ring_insert_nop,
6714         .pad_ib = amdgpu_ring_generic_pad_ib,
6715         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6716         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6717         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6718 };
6719
6720 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6721         .type = AMDGPU_RING_TYPE_KIQ,
6722         .align_mask = 0xff,
6723         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6724         .support_64bit_ptrs = true,
6725         .vmhub = AMDGPU_GFXHUB_0,
6726         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6727         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6728         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6729         .emit_frame_size =
6730                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6731                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6732                 5 + /* hdp invalidate */
6733                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6734                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6735                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6736                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6737                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6738         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6739         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6740         .test_ring = gfx_v9_0_ring_test_ring,
6741         .insert_nop = amdgpu_ring_insert_nop,
6742         .pad_ib = amdgpu_ring_generic_pad_ib,
6743         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6744         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6745         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6746         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6747 };
6748
6749 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6750 {
6751         int i;
6752
6753         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6754
6755         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6756                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6757
6758         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6759                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6760 }
6761
6762 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6763         .set = gfx_v9_0_set_eop_interrupt_state,
6764         .process = gfx_v9_0_eop_irq,
6765 };
6766
6767 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6768         .set = gfx_v9_0_set_priv_reg_fault_state,
6769         .process = gfx_v9_0_priv_reg_irq,
6770 };
6771
6772 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6773         .set = gfx_v9_0_set_priv_inst_fault_state,
6774         .process = gfx_v9_0_priv_inst_irq,
6775 };
6776
6777 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6778         .set = gfx_v9_0_set_cp_ecc_error_state,
6779         .process = amdgpu_gfx_cp_ecc_error_irq,
6780 };
6781
6782
6783 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6784 {
6785         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6786         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6787
6788         adev->gfx.priv_reg_irq.num_types = 1;
6789         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6790
6791         adev->gfx.priv_inst_irq.num_types = 1;
6792         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6793
6794         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6795         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6796 }
6797
6798 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6799 {
6800         switch (adev->asic_type) {
6801         case CHIP_VEGA10:
6802         case CHIP_VEGA12:
6803         case CHIP_VEGA20:
6804         case CHIP_RAVEN:
6805         case CHIP_ARCTURUS:
6806         case CHIP_RENOIR:
6807                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6808                 break;
6809         default:
6810                 break;
6811         }
6812 }
6813
6814 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6815 {
6816         /* init asci gds info */
6817         switch (adev->asic_type) {
6818         case CHIP_VEGA10:
6819         case CHIP_VEGA12:
6820         case CHIP_VEGA20:
6821                 adev->gds.gds_size = 0x10000;
6822                 break;
6823         case CHIP_RAVEN:
6824         case CHIP_ARCTURUS:
6825                 adev->gds.gds_size = 0x1000;
6826                 break;
6827         default:
6828                 adev->gds.gds_size = 0x10000;
6829                 break;
6830         }
6831
6832         switch (adev->asic_type) {
6833         case CHIP_VEGA10:
6834         case CHIP_VEGA20:
6835                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6836                 break;
6837         case CHIP_VEGA12:
6838                 adev->gds.gds_compute_max_wave_id = 0x27f;
6839                 break;
6840         case CHIP_RAVEN:
6841                 if (adev->rev_id >= 0x8)
6842                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6843                 else
6844                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6845                 break;
6846         case CHIP_ARCTURUS:
6847                 adev->gds.gds_compute_max_wave_id = 0xfff;
6848                 break;
6849         default:
6850                 /* this really depends on the chip */
6851                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6852                 break;
6853         }
6854
6855         adev->gds.gws_size = 64;
6856         adev->gds.oa_size = 16;
6857 }
6858
6859 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6860                                                  u32 bitmap)
6861 {
6862         u32 data;
6863
6864         if (!bitmap)
6865                 return;
6866
6867         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6868         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6869
6870         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6871 }
6872
6873 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6874 {
6875         u32 data, mask;
6876
6877         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6878         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6879
6880         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6881         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6882
6883         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6884
6885         return (~data) & mask;
6886 }
6887
6888 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6889                                  struct amdgpu_cu_info *cu_info)
6890 {
6891         int i, j, k, counter, active_cu_number = 0;
6892         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6893         unsigned disable_masks[4 * 4];
6894
6895         if (!adev || !cu_info)
6896                 return -EINVAL;
6897
6898         /*
6899          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6900          */
6901         if (adev->gfx.config.max_shader_engines *
6902                 adev->gfx.config.max_sh_per_se > 16)
6903                 return -EINVAL;
6904
6905         amdgpu_gfx_parse_disable_cu(disable_masks,
6906                                     adev->gfx.config.max_shader_engines,
6907                                     adev->gfx.config.max_sh_per_se);
6908
6909         mutex_lock(&adev->grbm_idx_mutex);
6910         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6911                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6912                         mask = 1;
6913                         ao_bitmap = 0;
6914                         counter = 0;
6915                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6916                         gfx_v9_0_set_user_cu_inactive_bitmap(
6917                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6918                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6919
6920                         /*
6921                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6922                          * 4x4 size array, and it's usually suitable for Vega
6923                          * ASICs which has 4*2 SE/SH layout.
6924                          * But for Arcturus, SE/SH layout is changed to 8*1.
6925                          * To mostly reduce the impact, we make it compatible
6926                          * with current bitmap array as below:
6927                          *    SE4,SH0 --> bitmap[0][1]
6928                          *    SE5,SH0 --> bitmap[1][1]
6929                          *    SE6,SH0 --> bitmap[2][1]
6930                          *    SE7,SH0 --> bitmap[3][1]
6931                          */
6932                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6933
6934                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6935                                 if (bitmap & mask) {
6936                                         if (counter < adev->gfx.config.max_cu_per_sh)
6937                                                 ao_bitmap |= mask;
6938                                         counter ++;
6939                                 }
6940                                 mask <<= 1;
6941                         }
6942                         active_cu_number += counter;
6943                         if (i < 2 && j < 2)
6944                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6945                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
6946                 }
6947         }
6948         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6949         mutex_unlock(&adev->grbm_idx_mutex);
6950
6951         cu_info->number = active_cu_number;
6952         cu_info->ao_cu_mask = ao_cu_mask;
6953         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6954
6955         return 0;
6956 }
6957
6958 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
6959 {
6960         .type = AMD_IP_BLOCK_TYPE_GFX,
6961         .major = 9,
6962         .minor = 0,
6963         .rev = 0,
6964         .funcs = &gfx_v9_0_ip_funcs,
6965 };