2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_psp.h"
32 #include "amdgpu_smu.h"
36 #include "gc/gc_10_1_0_offset.h"
37 #include "gc/gc_10_1_0_sh_mask.h"
38 #include "smuio/smuio_11_0_0_offset.h"
39 #include "smuio/smuio_11_0_0_sh_mask.h"
40 #include "navi10_enum.h"
41 #include "hdp/hdp_5_0_0_offset.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
46 #include "soc15_common.h"
47 #include "clearstate_gfx10.h"
48 #include "v10_structs.h"
49 #include "gfx_v10_0.h"
50 #include "nbio_v2_3.h"
53 * Navi10 has two graphic rings to share each graphic pipe.
57 #define GFX10_NUM_GFX_RINGS_NV1X 1
58 #define GFX10_MEC_HPD_SIZE 2048
60 #define F32_CE_PROGRAM_RAM_SIZE 65536
61 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
63 #define mmCGTT_GS_NGG_CLK_CTRL 0x5087
64 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
66 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
69 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
74 MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
75 MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
76 MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
77 MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
78 MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/navi14_me.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/navi12_me.bin");
88 MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
89 MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
92 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
136 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
138 /* Pending on emulation bring up */
141 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
183 static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
219 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
220 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
221 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
222 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
227 static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
229 static void *scratch_reg0;
230 static void *scratch_reg1;
231 static void *scratch_reg2;
232 static void *scratch_reg3;
233 static void *spare_int;
234 static uint32_t grbm_cntl;
235 static uint32_t grbm_idx;
237 uint32_t retries = 50000;
239 scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
240 scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
241 scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
242 scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
243 spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
245 grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
246 grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
248 if (amdgpu_sriov_runtime(adev)) {
249 pr_err("shouldn't call rlcg write register during runtime\n");
253 writel(v, scratch_reg0);
254 writel(offset | 0x80000000, scratch_reg1);
255 writel(1, spare_int);
256 for (i = 0; i < retries; i++) {
259 tmp = readl(scratch_reg1);
260 if (!(tmp & 0x80000000))
267 pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
270 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
272 /* Pending on emulation bring up */
275 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
277 /* Pending on emulation bring up */
280 #define DEFAULT_SH_MEM_CONFIG \
281 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
282 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
283 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
284 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
287 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
288 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
289 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
290 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
291 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
292 struct amdgpu_cu_info *cu_info);
293 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
294 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
295 u32 sh_num, u32 instance);
296 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
298 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
299 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
300 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
301 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
302 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
303 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
304 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
306 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
308 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
309 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
310 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
311 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
312 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
313 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
314 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
315 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
316 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
319 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
320 struct amdgpu_ring *ring)
322 struct amdgpu_device *adev = kiq_ring->adev;
323 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
324 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
325 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
327 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
328 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
329 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
330 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
331 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
332 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
333 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
334 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
335 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
336 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
337 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
338 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
339 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
340 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
341 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
342 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
343 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
346 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
347 struct amdgpu_ring *ring,
348 enum amdgpu_unmap_queues_action action,
349 u64 gpu_addr, u64 seq)
351 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
353 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
354 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
355 PACKET3_UNMAP_QUEUES_ACTION(action) |
356 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
357 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
358 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
359 amdgpu_ring_write(kiq_ring,
360 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
362 if (action == PREEMPT_QUEUES_NO_UNMAP) {
363 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
364 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
365 amdgpu_ring_write(kiq_ring, seq);
367 amdgpu_ring_write(kiq_ring, 0);
368 amdgpu_ring_write(kiq_ring, 0);
369 amdgpu_ring_write(kiq_ring, 0);
373 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
374 struct amdgpu_ring *ring,
378 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
380 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
381 amdgpu_ring_write(kiq_ring,
382 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
383 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
384 PACKET3_QUERY_STATUS_COMMAND(2));
385 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
386 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
387 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
388 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
389 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
390 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
391 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
394 static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
395 uint16_t pasid, uint32_t flush_type,
398 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
399 amdgpu_ring_write(kiq_ring,
400 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
401 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
402 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
403 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
406 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
407 .kiq_set_resources = gfx10_kiq_set_resources,
408 .kiq_map_queues = gfx10_kiq_map_queues,
409 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
410 .kiq_query_status = gfx10_kiq_query_status,
411 .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
412 .set_resources_size = 8,
413 .map_queues_size = 7,
414 .unmap_queues_size = 6,
415 .query_status_size = 7,
416 .invalidate_tlbs_size = 2,
419 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
421 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
424 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
426 switch (adev->asic_type) {
428 soc15_program_register_sequence(adev,
429 golden_settings_gc_10_1,
430 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
431 soc15_program_register_sequence(adev,
432 golden_settings_gc_10_0_nv10,
433 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
436 soc15_program_register_sequence(adev,
437 golden_settings_gc_10_1_1,
438 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
439 soc15_program_register_sequence(adev,
440 golden_settings_gc_10_1_nv14,
441 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
444 soc15_program_register_sequence(adev,
445 golden_settings_gc_10_1_2,
446 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
447 soc15_program_register_sequence(adev,
448 golden_settings_gc_10_1_2_nv12,
449 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
456 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
458 adev->gfx.scratch.num_reg = 8;
459 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
460 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
463 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
464 bool wc, uint32_t reg, uint32_t val)
466 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
467 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
468 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
469 amdgpu_ring_write(ring, reg);
470 amdgpu_ring_write(ring, 0);
471 amdgpu_ring_write(ring, val);
474 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
475 int mem_space, int opt, uint32_t addr0,
476 uint32_t addr1, uint32_t ref, uint32_t mask,
479 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
480 amdgpu_ring_write(ring,
481 /* memory (1) or register (0) */
482 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
483 WAIT_REG_MEM_OPERATION(opt) | /* wait */
484 WAIT_REG_MEM_FUNCTION(3) | /* equal */
485 WAIT_REG_MEM_ENGINE(eng_sel)));
488 BUG_ON(addr0 & 0x3); /* Dword align */
489 amdgpu_ring_write(ring, addr0);
490 amdgpu_ring_write(ring, addr1);
491 amdgpu_ring_write(ring, ref);
492 amdgpu_ring_write(ring, mask);
493 amdgpu_ring_write(ring, inv); /* poll interval */
496 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
498 struct amdgpu_device *adev = ring->adev;
504 r = amdgpu_gfx_scratch_get(adev, &scratch);
506 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
510 WREG32(scratch, 0xCAFEDEAD);
512 r = amdgpu_ring_alloc(ring, 3);
514 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
516 amdgpu_gfx_scratch_free(adev, scratch);
520 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
521 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
522 amdgpu_ring_write(ring, 0xDEADBEEF);
523 amdgpu_ring_commit(ring);
525 for (i = 0; i < adev->usec_timeout; i++) {
526 tmp = RREG32(scratch);
527 if (tmp == 0xDEADBEEF)
529 if (amdgpu_emu_mode == 1)
535 if (i >= adev->usec_timeout)
538 amdgpu_gfx_scratch_free(adev, scratch);
543 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
545 struct amdgpu_device *adev = ring->adev;
547 struct dma_fence *f = NULL;
553 r = amdgpu_device_wb_get(adev, &index);
557 gpu_addr = adev->wb.gpu_addr + (index * 4);
558 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
559 memset(&ib, 0, sizeof(ib));
560 r = amdgpu_ib_get(adev, NULL, 16,
561 AMDGPU_IB_POOL_DIRECT, &ib);
565 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
566 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
567 ib.ptr[2] = lower_32_bits(gpu_addr);
568 ib.ptr[3] = upper_32_bits(gpu_addr);
569 ib.ptr[4] = 0xDEADBEEF;
572 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
576 r = dma_fence_wait_timeout(f, false, timeout);
584 tmp = adev->wb.wb[index];
585 if (tmp == 0xDEADBEEF)
590 amdgpu_ib_free(adev, &ib, NULL);
593 amdgpu_device_wb_free(adev, index);
597 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
599 release_firmware(adev->gfx.pfp_fw);
600 adev->gfx.pfp_fw = NULL;
601 release_firmware(adev->gfx.me_fw);
602 adev->gfx.me_fw = NULL;
603 release_firmware(adev->gfx.ce_fw);
604 adev->gfx.ce_fw = NULL;
605 release_firmware(adev->gfx.rlc_fw);
606 adev->gfx.rlc_fw = NULL;
607 release_firmware(adev->gfx.mec_fw);
608 adev->gfx.mec_fw = NULL;
609 release_firmware(adev->gfx.mec2_fw);
610 adev->gfx.mec2_fw = NULL;
612 kfree(adev->gfx.rlc.register_list_format);
615 static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
617 adev->gfx.cp_fw_write_wait = false;
619 switch (adev->asic_type) {
623 if ((adev->gfx.me_fw_version >= 0x00000046) &&
624 (adev->gfx.me_feature_version >= 27) &&
625 (adev->gfx.pfp_fw_version >= 0x00000068) &&
626 (adev->gfx.pfp_feature_version >= 27) &&
627 (adev->gfx.mec_fw_version >= 0x0000005b) &&
628 (adev->gfx.mec_feature_version >= 27))
629 adev->gfx.cp_fw_write_wait = true;
635 if (adev->gfx.cp_fw_write_wait == false)
636 DRM_WARN_ONCE("CP firmware version too old, please update!");
640 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
642 const struct rlc_firmware_header_v2_1 *rlc_hdr;
644 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
645 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
646 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
647 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
648 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
649 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
650 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
651 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
652 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
653 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
654 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
655 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
656 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
657 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
658 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
661 static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
665 switch (adev->pdev->revision) {
678 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
680 switch (adev->asic_type) {
682 if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
683 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
690 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
692 const char *chip_name;
696 struct amdgpu_firmware_info *info = NULL;
697 const struct common_firmware_header *header = NULL;
698 const struct gfx_firmware_header_v1_0 *cp_hdr;
699 const struct rlc_firmware_header_v2_0 *rlc_hdr;
700 unsigned int *tmp = NULL;
702 uint16_t version_major;
703 uint16_t version_minor;
707 memset(wks, 0, sizeof(wks));
708 switch (adev->asic_type) {
710 chip_name = "navi10";
713 chip_name = "navi14";
714 if (!(adev->pdev->device == 0x7340 &&
715 adev->pdev->revision != 0x00))
716 snprintf(wks, sizeof(wks), "_wks");
719 chip_name = "navi12";
725 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
726 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
729 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
732 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
733 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
734 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
736 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
737 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
740 err = amdgpu_ucode_validate(adev->gfx.me_fw);
743 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
744 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
745 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
747 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
748 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
751 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
754 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
755 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
756 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
758 if (!amdgpu_sriov_vf(adev)) {
759 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
760 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
763 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
764 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
765 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
766 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
767 if (version_major == 2 && version_minor == 1)
768 adev->gfx.rlc.is_rlc_v2_1 = true;
770 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
771 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
772 adev->gfx.rlc.save_and_restore_offset =
773 le32_to_cpu(rlc_hdr->save_and_restore_offset);
774 adev->gfx.rlc.clear_state_descriptor_offset =
775 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
776 adev->gfx.rlc.avail_scratch_ram_locations =
777 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
778 adev->gfx.rlc.reg_restore_list_size =
779 le32_to_cpu(rlc_hdr->reg_restore_list_size);
780 adev->gfx.rlc.reg_list_format_start =
781 le32_to_cpu(rlc_hdr->reg_list_format_start);
782 adev->gfx.rlc.reg_list_format_separate_start =
783 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
784 adev->gfx.rlc.starting_offsets_start =
785 le32_to_cpu(rlc_hdr->starting_offsets_start);
786 adev->gfx.rlc.reg_list_format_size_bytes =
787 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
788 adev->gfx.rlc.reg_list_size_bytes =
789 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
790 adev->gfx.rlc.register_list_format =
791 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
792 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
793 if (!adev->gfx.rlc.register_list_format) {
798 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
799 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
800 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
801 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
803 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
805 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
806 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
807 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
808 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
810 if (adev->gfx.rlc.is_rlc_v2_1)
811 gfx_v10_0_init_rlc_ext_microcode(adev);
814 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
815 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
818 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
821 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
822 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
823 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
825 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
826 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
828 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
831 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
832 adev->gfx.mec2_fw->data;
833 adev->gfx.mec2_fw_version =
834 le32_to_cpu(cp_hdr->header.ucode_version);
835 adev->gfx.mec2_feature_version =
836 le32_to_cpu(cp_hdr->ucode_feature_version);
839 adev->gfx.mec2_fw = NULL;
842 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
843 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
844 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
845 info->fw = adev->gfx.pfp_fw;
846 header = (const struct common_firmware_header *)info->fw->data;
847 adev->firmware.fw_size +=
848 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
850 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
851 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
852 info->fw = adev->gfx.me_fw;
853 header = (const struct common_firmware_header *)info->fw->data;
854 adev->firmware.fw_size +=
855 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
857 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
858 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
859 info->fw = adev->gfx.ce_fw;
860 header = (const struct common_firmware_header *)info->fw->data;
861 adev->firmware.fw_size +=
862 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
864 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
865 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
866 info->fw = adev->gfx.rlc_fw;
868 header = (const struct common_firmware_header *)info->fw->data;
869 adev->firmware.fw_size +=
870 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
872 if (adev->gfx.rlc.is_rlc_v2_1 &&
873 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
874 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
875 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
876 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
877 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
878 info->fw = adev->gfx.rlc_fw;
879 adev->firmware.fw_size +=
880 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
882 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
883 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
884 info->fw = adev->gfx.rlc_fw;
885 adev->firmware.fw_size +=
886 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
888 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
889 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
890 info->fw = adev->gfx.rlc_fw;
891 adev->firmware.fw_size +=
892 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
895 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
896 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
897 info->fw = adev->gfx.mec_fw;
898 header = (const struct common_firmware_header *)info->fw->data;
899 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
900 adev->firmware.fw_size +=
901 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
902 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
904 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
905 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
906 info->fw = adev->gfx.mec_fw;
907 adev->firmware.fw_size +=
908 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
910 if (adev->gfx.mec2_fw) {
911 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
912 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
913 info->fw = adev->gfx.mec2_fw;
914 header = (const struct common_firmware_header *)info->fw->data;
915 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
916 adev->firmware.fw_size +=
917 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
918 le32_to_cpu(cp_hdr->jt_size) * 4,
920 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
921 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
922 info->fw = adev->gfx.mec2_fw;
923 adev->firmware.fw_size +=
924 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
929 gfx_v10_0_check_fw_write_wait(adev);
933 "gfx10: Failed to load firmware \"%s\"\n",
935 release_firmware(adev->gfx.pfp_fw);
936 adev->gfx.pfp_fw = NULL;
937 release_firmware(adev->gfx.me_fw);
938 adev->gfx.me_fw = NULL;
939 release_firmware(adev->gfx.ce_fw);
940 adev->gfx.ce_fw = NULL;
941 release_firmware(adev->gfx.rlc_fw);
942 adev->gfx.rlc_fw = NULL;
943 release_firmware(adev->gfx.mec_fw);
944 adev->gfx.mec_fw = NULL;
945 release_firmware(adev->gfx.mec2_fw);
946 adev->gfx.mec2_fw = NULL;
949 gfx_v10_0_check_gfxoff_flag(adev);
954 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
957 const struct cs_section_def *sect = NULL;
958 const struct cs_extent_def *ext = NULL;
960 /* begin clear state */
962 /* context control state */
965 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
966 for (ext = sect->section; ext->extent != NULL; ++ext) {
967 if (sect->id == SECT_CONTEXT)
968 count += 2 + ext->reg_count;
974 /* set PA_SC_TILE_STEERING_OVERRIDE */
976 /* end clear state */
984 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
985 volatile u32 *buffer)
988 const struct cs_section_def *sect = NULL;
989 const struct cs_extent_def *ext = NULL;
992 if (adev->gfx.rlc.cs_data == NULL)
997 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
998 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1000 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1001 buffer[count++] = cpu_to_le32(0x80000000);
1002 buffer[count++] = cpu_to_le32(0x80000000);
1004 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1005 for (ext = sect->section; ext->extent != NULL; ++ext) {
1006 if (sect->id == SECT_CONTEXT) {
1008 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1009 buffer[count++] = cpu_to_le32(ext->reg_index -
1010 PACKET3_SET_CONTEXT_REG_START);
1011 for (i = 0; i < ext->reg_count; i++)
1012 buffer[count++] = cpu_to_le32(ext->extent[i]);
1020 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
1021 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
1022 buffer[count++] = cpu_to_le32(ctx_reg_offset);
1023 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
1025 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1026 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1028 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1029 buffer[count++] = cpu_to_le32(0);
1032 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
1034 /* clear state block */
1035 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1036 &adev->gfx.rlc.clear_state_gpu_addr,
1037 (void **)&adev->gfx.rlc.cs_ptr);
1039 /* jump table block */
1040 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1041 &adev->gfx.rlc.cp_table_gpu_addr,
1042 (void **)&adev->gfx.rlc.cp_table_ptr);
1045 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
1047 const struct cs_section_def *cs_data;
1050 adev->gfx.rlc.cs_data = gfx10_cs_data;
1052 cs_data = adev->gfx.rlc.cs_data;
1055 /* init clear state block */
1056 r = amdgpu_gfx_rlc_init_csb(adev);
1061 /* init spm vmid with 0xf */
1062 if (adev->gfx.rlc.funcs->update_spm_vmid)
1063 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1068 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
1070 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1071 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1074 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
1078 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1080 amdgpu_gfx_graphics_queue_acquire(adev);
1082 r = gfx_v10_0_init_microcode(adev);
1084 DRM_ERROR("Failed to load gfx firmware!\n");
1089 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
1093 const __le32 *fw_data = NULL;
1096 size_t mec_hpd_size;
1098 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
1100 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1102 /* take ownership of the relevant compute queues */
1103 amdgpu_gfx_compute_queue_acquire(adev);
1104 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
1106 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1107 AMDGPU_GEM_DOMAIN_GTT,
1108 &adev->gfx.mec.hpd_eop_obj,
1109 &adev->gfx.mec.hpd_eop_gpu_addr,
1112 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1113 gfx_v10_0_mec_fini(adev);
1117 memset(hpd, 0, mec_hpd_size);
1119 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1120 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1122 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1123 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1125 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1126 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1127 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1129 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1130 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1131 &adev->gfx.mec.mec_fw_obj,
1132 &adev->gfx.mec.mec_fw_gpu_addr,
1135 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
1136 gfx_v10_0_mec_fini(adev);
1140 memcpy(fw, fw_data, fw_size);
1142 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1143 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1149 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1151 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1152 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1153 (address << SQ_IND_INDEX__INDEX__SHIFT));
1154 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1157 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1158 uint32_t thread, uint32_t regno,
1159 uint32_t num, uint32_t *out)
1161 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1162 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1163 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1164 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1165 (SQ_IND_INDEX__AUTO_INCR_MASK));
1167 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1170 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1172 /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
1173 * field when performing a select_se_sh so it should be
1177 /* type 2 wave data */
1178 dst[(*no_fields)++] = 2;
1179 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1180 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1181 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1182 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1183 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1184 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1185 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1186 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
1187 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1188 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1189 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1190 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1191 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1192 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1193 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1196 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1197 uint32_t wave, uint32_t start,
1198 uint32_t size, uint32_t *dst)
1203 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1207 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1208 uint32_t wave, uint32_t thread,
1209 uint32_t start, uint32_t size,
1214 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1217 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1218 u32 me, u32 pipe, u32 q, u32 vm)
1220 nv_grbm_select(adev, me, pipe, q, vm);
1224 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1225 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1226 .select_se_sh = &gfx_v10_0_select_se_sh,
1227 .read_wave_data = &gfx_v10_0_read_wave_data,
1228 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1229 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1230 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1233 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1237 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1239 switch (adev->asic_type) {
1243 adev->gfx.config.max_hw_contexts = 8;
1244 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1245 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1246 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1247 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1248 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1255 adev->gfx.config.gb_addr_config = gb_addr_config;
1257 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1258 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1259 GB_ADDR_CONFIG, NUM_PIPES);
1261 adev->gfx.config.max_tile_pipes =
1262 adev->gfx.config.gb_addr_config_fields.num_pipes;
1264 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1265 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1266 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1267 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1268 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1269 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1270 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1271 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1272 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1273 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1274 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1275 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1278 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1279 int me, int pipe, int queue)
1282 struct amdgpu_ring *ring;
1283 unsigned int irq_type;
1285 ring = &adev->gfx.gfx_ring[ring_id];
1289 ring->queue = queue;
1291 ring->ring_obj = NULL;
1292 ring->use_doorbell = true;
1295 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1297 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1298 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1300 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1301 r = amdgpu_ring_init(adev, ring, 1024,
1302 &adev->gfx.eop_irq, irq_type);
1308 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1309 int mec, int pipe, int queue)
1313 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1315 ring = &adev->gfx.compute_ring[ring_id];
1320 ring->queue = queue;
1322 ring->ring_obj = NULL;
1323 ring->use_doorbell = true;
1324 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1325 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1326 + (ring_id * GFX10_MEC_HPD_SIZE);
1327 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1329 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1330 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1333 /* type-2 packets are deprecated on MEC, use type-3 instead */
1334 r = amdgpu_ring_init(adev, ring, 1024,
1335 &adev->gfx.eop_irq, irq_type);
1342 static int gfx_v10_0_sw_init(void *handle)
1344 int i, j, k, r, ring_id = 0;
1345 struct amdgpu_kiq *kiq;
1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1348 switch (adev->asic_type) {
1352 adev->gfx.me.num_me = 1;
1353 adev->gfx.me.num_pipe_per_me = 1;
1354 adev->gfx.me.num_queue_per_pipe = 1;
1355 adev->gfx.mec.num_mec = 2;
1356 adev->gfx.mec.num_pipe_per_mec = 4;
1357 adev->gfx.mec.num_queue_per_pipe = 8;
1360 adev->gfx.me.num_me = 1;
1361 adev->gfx.me.num_pipe_per_me = 1;
1362 adev->gfx.me.num_queue_per_pipe = 1;
1363 adev->gfx.mec.num_mec = 1;
1364 adev->gfx.mec.num_pipe_per_mec = 4;
1365 adev->gfx.mec.num_queue_per_pipe = 8;
1370 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1371 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1372 &adev->gfx.kiq.irq);
1377 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1378 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1379 &adev->gfx.eop_irq);
1383 /* Privileged reg */
1384 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1385 &adev->gfx.priv_reg_irq);
1389 /* Privileged inst */
1390 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1391 &adev->gfx.priv_inst_irq);
1395 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1397 gfx_v10_0_scratch_init(adev);
1399 r = gfx_v10_0_me_init(adev);
1403 r = gfx_v10_0_rlc_init(adev);
1405 DRM_ERROR("Failed to init rlc BOs!\n");
1409 r = gfx_v10_0_mec_init(adev);
1411 DRM_ERROR("Failed to init MEC BOs!\n");
1415 /* set up the gfx ring */
1416 for (i = 0; i < adev->gfx.me.num_me; i++) {
1417 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1418 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1419 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1422 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1432 /* set up the compute queues - allocate horizontally across pipes */
1433 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1434 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1435 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1436 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1440 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1450 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1452 DRM_ERROR("Failed to init KIQ BOs!\n");
1456 kiq = &adev->gfx.kiq;
1457 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1461 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1465 /* allocate visible FB for rlc auto-loading fw */
1466 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1467 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1472 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1474 gfx_v10_0_gpu_early_init(adev);
1479 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1481 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1482 &adev->gfx.pfp.pfp_fw_gpu_addr,
1483 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1486 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1488 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1489 &adev->gfx.ce.ce_fw_gpu_addr,
1490 (void **)&adev->gfx.ce.ce_fw_ptr);
1493 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1495 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1496 &adev->gfx.me.me_fw_gpu_addr,
1497 (void **)&adev->gfx.me.me_fw_ptr);
1500 static int gfx_v10_0_sw_fini(void *handle)
1503 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1506 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1507 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1508 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1510 amdgpu_gfx_mqd_sw_fini(adev);
1511 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1512 amdgpu_gfx_kiq_fini(adev);
1514 gfx_v10_0_pfp_fini(adev);
1515 gfx_v10_0_ce_fini(adev);
1516 gfx_v10_0_me_fini(adev);
1517 gfx_v10_0_rlc_fini(adev);
1518 gfx_v10_0_mec_fini(adev);
1520 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1521 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1523 gfx_v10_0_free_microcode(adev);
1529 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1534 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1535 u32 sh_num, u32 instance)
1539 if (instance == 0xffffffff)
1540 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1541 INSTANCE_BROADCAST_WRITES, 1);
1543 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1546 if (se_num == 0xffffffff)
1547 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1550 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1552 if (sh_num == 0xffffffff)
1553 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1556 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1558 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1561 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1565 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1566 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1568 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1569 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1571 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1572 adev->gfx.config.max_sh_per_se);
1574 return (~data) & mask;
1577 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1582 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1583 adev->gfx.config.max_sh_per_se;
1585 mutex_lock(&adev->grbm_idx_mutex);
1586 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1587 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1588 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1589 data = gfx_v10_0_get_rb_active_bitmap(adev);
1590 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1591 rb_bitmap_width_per_sh);
1594 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1595 mutex_unlock(&adev->grbm_idx_mutex);
1597 adev->gfx.config.backend_enable_mask = active_rbs;
1598 adev->gfx.config.num_rbs = hweight32(active_rbs);
1601 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1604 uint32_t enabled_rb_per_sh;
1605 uint32_t active_rb_bitmap;
1606 uint32_t num_rb_per_sc;
1607 uint32_t num_packer_per_sc;
1608 uint32_t pa_sc_tile_steering_override;
1611 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1612 adev->gfx.config.num_sc_per_sh;
1613 /* init num_rb_per_sc */
1614 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1615 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1616 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1617 /* init num_packer_per_sc */
1618 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1620 pa_sc_tile_steering_override = 0;
1621 pa_sc_tile_steering_override |=
1622 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1623 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1624 pa_sc_tile_steering_override |=
1625 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1626 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1627 pa_sc_tile_steering_override |=
1628 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1629 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1631 return pa_sc_tile_steering_override;
1634 #define DEFAULT_SH_MEM_BASES (0x6000)
1635 #define FIRST_COMPUTE_VMID (8)
1636 #define LAST_COMPUTE_VMID (16)
1638 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1641 uint32_t sh_mem_bases;
1644 * Configure apertures:
1645 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1646 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1647 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1649 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1651 mutex_lock(&adev->srbm_mutex);
1652 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1653 nv_grbm_select(adev, 0, 0, 0, i);
1654 /* CP and shaders */
1655 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1656 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1658 nv_grbm_select(adev, 0, 0, 0, 0);
1659 mutex_unlock(&adev->srbm_mutex);
1661 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1662 acccess. These should be enabled by FW for target VMIDs. */
1663 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1664 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1665 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1666 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1667 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1671 static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
1676 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1677 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1678 * the driver can enable them for graphics. VMID0 should maintain
1679 * access so that HWS firmware can save/restore entries.
1681 for (vmid = 1; vmid < 16; vmid++) {
1682 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
1683 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
1684 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
1685 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
1690 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1693 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1694 u32 tmp, wgp_active_bitmap = 0;
1695 u32 gcrd_targets_disable_tcp = 0;
1696 u32 utcl_invreq_disable = 0;
1698 * GCRD_TARGETS_DISABLE field contains
1699 * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1700 * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
1702 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1703 2 * max_wgp_per_sh + /* TCP */
1704 max_wgp_per_sh + /* SQC */
1707 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1708 * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1709 * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
1711 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1712 2 * max_wgp_per_sh + /* TCP */
1713 2 * max_wgp_per_sh + /* SQC */
1717 if (adev->asic_type == CHIP_NAVI10 ||
1718 adev->asic_type == CHIP_NAVI14 ||
1719 adev->asic_type == CHIP_NAVI12) {
1720 mutex_lock(&adev->grbm_idx_mutex);
1721 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1722 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1723 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1724 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1726 * Set corresponding TCP bits for the inactive WGPs in
1727 * GCRD_SA_TARGETS_DISABLE
1729 gcrd_targets_disable_tcp = 0;
1730 /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1731 utcl_invreq_disable = 0;
1733 for (k = 0; k < max_wgp_per_sh; k++) {
1734 if (!(wgp_active_bitmap & (1 << k))) {
1735 gcrd_targets_disable_tcp |= 3 << (2 * k);
1736 utcl_invreq_disable |= (3 << (2 * k)) |
1737 (3 << (2 * (max_wgp_per_sh + k)));
1741 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1742 /* only override TCP & SQC bits */
1743 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1744 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1745 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1747 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1748 /* only override TCP bits */
1749 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1750 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1751 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1755 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1756 mutex_unlock(&adev->grbm_idx_mutex);
1760 static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
1762 /* TCCs are global (not instanced). */
1763 uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
1764 RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
1766 adev->gfx.config.tcc_disabled_mask =
1767 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1768 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1771 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1776 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1778 gfx_v10_0_tiling_mode_table_init(adev);
1780 gfx_v10_0_setup_rb(adev);
1781 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1782 gfx_v10_0_get_tcc_info(adev);
1783 adev->gfx.config.pa_sc_tile_steering_override =
1784 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1786 /* XXX SH_MEM regs */
1787 /* where to put LDS, scratch, GPUVM in FSA64 space */
1788 mutex_lock(&adev->srbm_mutex);
1789 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1790 nv_grbm_select(adev, 0, 0, 0, i);
1791 /* CP and shaders */
1792 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1794 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1795 (adev->gmc.private_aperture_start >> 48));
1796 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1797 (adev->gmc.shared_aperture_start >> 48));
1798 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1801 nv_grbm_select(adev, 0, 0, 0, 0);
1803 mutex_unlock(&adev->srbm_mutex);
1805 gfx_v10_0_init_compute_vmid(adev);
1806 gfx_v10_0_init_gds_vmid(adev);
1810 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1813 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1815 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1817 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1819 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1821 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1824 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1827 static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
1829 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1832 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
1833 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1834 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
1835 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1836 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1841 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1843 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1845 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1846 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1849 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1851 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1853 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1857 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1860 uint32_t rlc_pg_cntl;
1862 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1865 /* RLC_PG_CNTL[23] = 0 (default)
1866 * RLC will wait for handshake acks with SMU
1867 * GFXOFF will be enabled
1868 * RLC_PG_CNTL[23] = 1
1869 * RLC will not issue any message to SMU
1870 * hence no handshake between SMU & RLC
1871 * GFXOFF will be disabled
1873 rlc_pg_cntl |= 0x800000;
1875 rlc_pg_cntl &= ~0x800000;
1876 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1879 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1881 /* TODO: enable rlc & smu handshake until smu
1882 * and gfxoff feature works as expected */
1883 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1884 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1886 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1890 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1894 /* enable Save Restore Machine */
1895 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1896 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1897 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1898 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1901 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1903 const struct rlc_firmware_header_v2_0 *hdr;
1904 const __le32 *fw_data;
1905 unsigned i, fw_size;
1907 if (!adev->gfx.rlc_fw)
1910 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1911 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1913 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1914 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1915 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1917 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1918 RLCG_UCODE_LOADING_START_ADDRESS);
1920 for (i = 0; i < fw_size; i++)
1921 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1922 le32_to_cpup(fw_data++));
1924 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1929 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1933 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1935 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1939 gfx_v10_0_init_csb(adev);
1941 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1942 gfx_v10_0_rlc_enable_srm(adev);
1944 if (amdgpu_sriov_vf(adev)) {
1945 gfx_v10_0_init_csb(adev);
1949 adev->gfx.rlc.funcs->stop(adev);
1952 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1955 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1957 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1958 /* legacy rlc firmware loading */
1959 r = gfx_v10_0_rlc_load_microcode(adev);
1962 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1963 /* rlc backdoor autoload firmware */
1964 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1969 gfx_v10_0_init_csb(adev);
1971 adev->gfx.rlc.funcs->start(adev);
1973 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1974 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1984 unsigned int offset;
1986 } rlc_autoload_info[FIRMWARE_ID_MAX];
1988 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1991 RLC_TABLE_OF_CONTENT *rlc_toc;
1993 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1994 AMDGPU_GEM_DOMAIN_GTT,
1995 &adev->gfx.rlc.rlc_toc_bo,
1996 &adev->gfx.rlc.rlc_toc_gpu_addr,
1997 (void **)&adev->gfx.rlc.rlc_toc_buf);
1999 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
2003 /* Copy toc from psp sos fw to rlc toc buffer */
2004 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
2006 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
2007 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
2008 (rlc_toc->id < FIRMWARE_ID_MAX)) {
2009 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
2010 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
2011 /* Offset needs 4KB alignment */
2012 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
2015 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
2016 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
2017 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
2025 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
2027 uint32_t total_size = 0;
2031 ret = gfx_v10_0_parse_rlc_toc(adev);
2033 dev_err(adev->dev, "failed to parse rlc toc\n");
2037 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
2038 total_size += rlc_autoload_info[id].size;
2040 /* In case the offset in rlc toc ucode is aligned */
2041 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
2042 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
2043 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
2048 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
2051 uint32_t total_size;
2053 total_size = gfx_v10_0_calc_toc_total_size(adev);
2055 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
2056 AMDGPU_GEM_DOMAIN_GTT,
2057 &adev->gfx.rlc.rlc_autoload_bo,
2058 &adev->gfx.rlc.rlc_autoload_gpu_addr,
2059 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2061 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
2068 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
2070 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
2071 &adev->gfx.rlc.rlc_toc_gpu_addr,
2072 (void **)&adev->gfx.rlc.rlc_toc_buf);
2073 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
2074 &adev->gfx.rlc.rlc_autoload_gpu_addr,
2075 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2078 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
2080 const void *fw_data,
2083 uint32_t toc_offset;
2084 uint32_t toc_fw_size;
2085 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
2087 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
2090 toc_offset = rlc_autoload_info[id].offset;
2091 toc_fw_size = rlc_autoload_info[id].size;
2094 fw_size = toc_fw_size;
2096 if (fw_size > toc_fw_size)
2097 fw_size = toc_fw_size;
2099 memcpy(ptr + toc_offset, fw_data, fw_size);
2101 if (fw_size < toc_fw_size)
2102 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
2105 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
2110 data = adev->gfx.rlc.rlc_toc_buf;
2111 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
2113 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2114 FIRMWARE_ID_RLC_TOC,
2118 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
2120 const __le32 *fw_data;
2122 const struct gfx_firmware_header_v1_0 *cp_hdr;
2123 const struct rlc_firmware_header_v2_0 *rlc_hdr;
2126 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2127 adev->gfx.pfp_fw->data;
2128 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2129 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2130 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2131 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2136 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2137 adev->gfx.ce_fw->data;
2138 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2139 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2140 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2141 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2146 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2147 adev->gfx.me_fw->data;
2148 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2149 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2150 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2151 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2156 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
2157 adev->gfx.rlc_fw->data;
2158 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2159 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
2160 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
2161 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2162 FIRMWARE_ID_RLC_G_UCODE,
2166 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2167 adev->gfx.mec_fw->data;
2168 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2169 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2170 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
2171 cp_hdr->jt_size * 4;
2172 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2175 /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
2178 /* Temporarily put sdma part here */
2179 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
2181 const __le32 *fw_data;
2183 const struct sdma_firmware_header_v1_0 *sdma_hdr;
2186 for (i = 0; i < adev->sdma.num_instances; i++) {
2187 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
2188 adev->sdma.instance[i].fw->data;
2189 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
2190 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
2191 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
2194 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2195 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
2196 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2197 FIRMWARE_ID_SDMA0_JT,
2198 (uint32_t *)fw_data +
2199 sdma_hdr->jt_offset,
2200 sdma_hdr->jt_size * 4);
2201 } else if (i == 1) {
2202 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2203 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
2204 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2205 FIRMWARE_ID_SDMA1_JT,
2206 (uint32_t *)fw_data +
2207 sdma_hdr->jt_offset,
2208 sdma_hdr->jt_size * 4);
2213 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
2215 uint32_t rlc_g_offset, rlc_g_size, tmp;
2218 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
2219 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
2220 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
2222 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
2223 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
2224 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
2226 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
2227 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
2228 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
2230 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
2231 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
2232 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
2233 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
2237 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2238 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2239 DRM_ERROR("RLC ROM should halt itself\n");
2246 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
2248 uint32_t usec_timeout = 50000; /* wait for 50ms */
2253 /* Trigger an invalidation of the L1 instruction caches */
2254 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2255 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2256 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2258 /* Wait for invalidation complete */
2259 for (i = 0; i < usec_timeout; i++) {
2260 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2261 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2262 INVALIDATE_CACHE_COMPLETE))
2267 if (i >= usec_timeout) {
2268 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2272 /* Program me ucode address into intruction cache address register */
2273 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2274 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2275 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2276 lower_32_bits(addr) & 0xFFFFF000);
2277 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2278 upper_32_bits(addr));
2283 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2285 uint32_t usec_timeout = 50000; /* wait for 50ms */
2290 /* Trigger an invalidation of the L1 instruction caches */
2291 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2292 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2293 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2295 /* Wait for invalidation complete */
2296 for (i = 0; i < usec_timeout; i++) {
2297 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2298 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2299 INVALIDATE_CACHE_COMPLETE))
2304 if (i >= usec_timeout) {
2305 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2309 /* Program ce ucode address into intruction cache address register */
2310 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2311 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2312 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2313 lower_32_bits(addr) & 0xFFFFF000);
2314 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2315 upper_32_bits(addr));
2320 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2322 uint32_t usec_timeout = 50000; /* wait for 50ms */
2327 /* Trigger an invalidation of the L1 instruction caches */
2328 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2329 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2330 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2332 /* Wait for invalidation complete */
2333 for (i = 0; i < usec_timeout; i++) {
2334 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2335 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2336 INVALIDATE_CACHE_COMPLETE))
2341 if (i >= usec_timeout) {
2342 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2346 /* Program pfp ucode address into intruction cache address register */
2347 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2348 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2349 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2350 lower_32_bits(addr) & 0xFFFFF000);
2351 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2352 upper_32_bits(addr));
2357 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2359 uint32_t usec_timeout = 50000; /* wait for 50ms */
2364 /* Trigger an invalidation of the L1 instruction caches */
2365 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2366 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2367 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2369 /* Wait for invalidation complete */
2370 for (i = 0; i < usec_timeout; i++) {
2371 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2372 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2373 INVALIDATE_CACHE_COMPLETE))
2378 if (i >= usec_timeout) {
2379 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2383 /* Program mec1 ucode address into intruction cache address register */
2384 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2385 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2386 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2387 lower_32_bits(addr) & 0xFFFFF000);
2388 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2389 upper_32_bits(addr));
2394 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2397 uint32_t bootload_status;
2400 for (i = 0; i < adev->usec_timeout; i++) {
2401 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2402 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2403 if ((cp_status == 0) &&
2404 (REG_GET_FIELD(bootload_status,
2405 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2411 if (i >= adev->usec_timeout) {
2412 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2416 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2417 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2421 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2425 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2429 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2437 static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2440 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2442 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2443 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2444 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2446 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2447 adev->gfx.gfx_ring[i].sched.ready = false;
2449 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
2451 for (i = 0; i < adev->usec_timeout; i++) {
2452 if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
2457 if (i >= adev->usec_timeout)
2458 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2463 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2466 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2467 const __le32 *fw_data;
2468 unsigned i, fw_size;
2470 uint32_t usec_timeout = 50000; /* wait for 50ms */
2472 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2473 adev->gfx.pfp_fw->data;
2475 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2477 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2478 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2479 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2481 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2482 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2483 &adev->gfx.pfp.pfp_fw_obj,
2484 &adev->gfx.pfp.pfp_fw_gpu_addr,
2485 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2487 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2488 gfx_v10_0_pfp_fini(adev);
2492 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2494 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2495 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2497 /* Trigger an invalidation of the L1 instruction caches */
2498 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2499 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2500 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2502 /* Wait for invalidation complete */
2503 for (i = 0; i < usec_timeout; i++) {
2504 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2505 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2506 INVALIDATE_CACHE_COMPLETE))
2511 if (i >= usec_timeout) {
2512 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2516 if (amdgpu_emu_mode == 1)
2517 adev->nbio.funcs->hdp_flush(adev, NULL);
2519 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2520 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2521 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2522 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2523 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2524 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2525 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2526 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2527 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2528 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2533 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2536 const struct gfx_firmware_header_v1_0 *ce_hdr;
2537 const __le32 *fw_data;
2538 unsigned i, fw_size;
2540 uint32_t usec_timeout = 50000; /* wait for 50ms */
2542 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2543 adev->gfx.ce_fw->data;
2545 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2547 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2548 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2549 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2551 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2552 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2553 &adev->gfx.ce.ce_fw_obj,
2554 &adev->gfx.ce.ce_fw_gpu_addr,
2555 (void **)&adev->gfx.ce.ce_fw_ptr);
2557 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2558 gfx_v10_0_ce_fini(adev);
2562 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2564 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2565 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2567 /* Trigger an invalidation of the L1 instruction caches */
2568 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2569 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2570 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2572 /* Wait for invalidation complete */
2573 for (i = 0; i < usec_timeout; i++) {
2574 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2575 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2576 INVALIDATE_CACHE_COMPLETE))
2581 if (i >= usec_timeout) {
2582 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2586 if (amdgpu_emu_mode == 1)
2587 adev->nbio.funcs->hdp_flush(adev, NULL);
2589 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2590 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2591 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2592 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2593 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2594 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2595 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2596 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2597 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2602 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2605 const struct gfx_firmware_header_v1_0 *me_hdr;
2606 const __le32 *fw_data;
2607 unsigned i, fw_size;
2609 uint32_t usec_timeout = 50000; /* wait for 50ms */
2611 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2612 adev->gfx.me_fw->data;
2614 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2616 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2617 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2618 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2620 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2621 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2622 &adev->gfx.me.me_fw_obj,
2623 &adev->gfx.me.me_fw_gpu_addr,
2624 (void **)&adev->gfx.me.me_fw_ptr);
2626 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2627 gfx_v10_0_me_fini(adev);
2631 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2633 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2634 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2636 /* Trigger an invalidation of the L1 instruction caches */
2637 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2638 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2639 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2641 /* Wait for invalidation complete */
2642 for (i = 0; i < usec_timeout; i++) {
2643 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2644 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2645 INVALIDATE_CACHE_COMPLETE))
2650 if (i >= usec_timeout) {
2651 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2655 if (amdgpu_emu_mode == 1)
2656 adev->nbio.funcs->hdp_flush(adev, NULL);
2658 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2659 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2660 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2661 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2662 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2663 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2664 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2665 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2666 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2671 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2675 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2678 gfx_v10_0_cp_gfx_enable(adev, false);
2680 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2682 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2686 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2688 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2692 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2694 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2701 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2703 struct amdgpu_ring *ring;
2704 const struct cs_section_def *sect = NULL;
2705 const struct cs_extent_def *ext = NULL;
2710 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2711 adev->gfx.config.max_hw_contexts - 1);
2712 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2714 gfx_v10_0_cp_gfx_enable(adev, true);
2716 ring = &adev->gfx.gfx_ring[0];
2717 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2719 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2723 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2724 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2726 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2727 amdgpu_ring_write(ring, 0x80000000);
2728 amdgpu_ring_write(ring, 0x80000000);
2730 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2731 for (ext = sect->section; ext->extent != NULL; ++ext) {
2732 if (sect->id == SECT_CONTEXT) {
2733 amdgpu_ring_write(ring,
2734 PACKET3(PACKET3_SET_CONTEXT_REG,
2736 amdgpu_ring_write(ring, ext->reg_index -
2737 PACKET3_SET_CONTEXT_REG_START);
2738 for (i = 0; i < ext->reg_count; i++)
2739 amdgpu_ring_write(ring, ext->extent[i]);
2745 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2746 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2747 amdgpu_ring_write(ring, ctx_reg_offset);
2748 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2750 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2751 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2753 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2754 amdgpu_ring_write(ring, 0);
2756 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2757 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2758 amdgpu_ring_write(ring, 0x8000);
2759 amdgpu_ring_write(ring, 0x8000);
2761 amdgpu_ring_commit(ring);
2763 /* submit cs packet to copy state 0 to next available state */
2764 if (adev->gfx.num_gfx_rings > 1) {
2765 /* maximum supported gfx ring is 2 */
2766 ring = &adev->gfx.gfx_ring[1];
2767 r = amdgpu_ring_alloc(ring, 2);
2769 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2773 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2774 amdgpu_ring_write(ring, 0);
2776 amdgpu_ring_commit(ring);
2781 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2786 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2787 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2789 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2792 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2793 struct amdgpu_ring *ring)
2797 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2798 if (ring->use_doorbell) {
2799 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2800 DOORBELL_OFFSET, ring->doorbell_index);
2801 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2804 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2807 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2808 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2809 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2810 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2812 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2813 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2816 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2818 struct amdgpu_ring *ring;
2821 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2824 /* Set the write pointer delay */
2825 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2827 /* set the RB to use vmid 0 */
2828 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2830 /* Init gfx ring 0 for pipe 0 */
2831 mutex_lock(&adev->srbm_mutex);
2832 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2834 /* Set ring buffer size */
2835 ring = &adev->gfx.gfx_ring[0];
2836 rb_bufsz = order_base_2(ring->ring_size / 8);
2837 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2838 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2840 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2842 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2844 /* Initialize the ring buffer's write pointers */
2846 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2847 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2849 /* set the wb address wether it's enabled or not */
2850 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2851 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2852 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2853 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2855 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2856 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2857 lower_32_bits(wptr_gpu_addr));
2858 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2859 upper_32_bits(wptr_gpu_addr));
2862 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2864 rb_addr = ring->gpu_addr >> 8;
2865 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2866 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2868 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2870 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2871 mutex_unlock(&adev->srbm_mutex);
2873 /* Init gfx ring 1 for pipe 1 */
2874 if (adev->gfx.num_gfx_rings > 1) {
2875 mutex_lock(&adev->srbm_mutex);
2876 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2877 /* maximum supported gfx ring is 2 */
2878 ring = &adev->gfx.gfx_ring[1];
2879 rb_bufsz = order_base_2(ring->ring_size / 8);
2880 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2881 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2882 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2883 /* Initialize the ring buffer's write pointers */
2885 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2886 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2887 /* Set the wb address wether it's enabled or not */
2888 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2889 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2890 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2891 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2892 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2893 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2894 lower_32_bits(wptr_gpu_addr));
2895 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2896 upper_32_bits(wptr_gpu_addr));
2899 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2901 rb_addr = ring->gpu_addr >> 8;
2902 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2903 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2904 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2906 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2907 mutex_unlock(&adev->srbm_mutex);
2909 /* Switch to pipe 0 */
2910 mutex_lock(&adev->srbm_mutex);
2911 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2912 mutex_unlock(&adev->srbm_mutex);
2914 /* start the ring */
2915 gfx_v10_0_cp_gfx_start(adev);
2917 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2918 ring = &adev->gfx.gfx_ring[i];
2919 ring->sched.ready = true;
2925 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2930 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2932 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2933 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2934 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2935 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2936 adev->gfx.compute_ring[i].sched.ready = false;
2937 adev->gfx.kiq.ring.sched.ready = false;
2942 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2944 const struct gfx_firmware_header_v1_0 *mec_hdr;
2945 const __le32 *fw_data;
2948 u32 usec_timeout = 50000; /* Wait for 50 ms */
2950 if (!adev->gfx.mec_fw)
2953 gfx_v10_0_cp_compute_enable(adev, false);
2955 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2956 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2958 fw_data = (const __le32 *)
2959 (adev->gfx.mec_fw->data +
2960 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2962 /* Trigger an invalidation of the L1 instruction caches */
2963 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2964 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2965 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2967 /* Wait for invalidation complete */
2968 for (i = 0; i < usec_timeout; i++) {
2969 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2970 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2971 INVALIDATE_CACHE_COMPLETE))
2976 if (i >= usec_timeout) {
2977 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2981 if (amdgpu_emu_mode == 1)
2982 adev->nbio.funcs->hdp_flush(adev, NULL);
2984 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2985 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2986 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2987 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2988 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2990 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2992 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2993 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2996 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2998 for (i = 0; i < mec_hdr->jt_size; i++)
2999 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3000 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3002 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3005 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
3006 * different microcode than MEC1.
3012 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
3015 struct amdgpu_device *adev = ring->adev;
3017 /* tell RLC which is KIQ queue */
3018 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3020 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3021 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3023 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3026 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
3028 struct amdgpu_device *adev = ring->adev;
3029 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3030 uint64_t hqd_gpu_addr, wb_gpu_addr;
3034 /* set up gfx hqd wptr */
3035 mqd->cp_gfx_hqd_wptr = 0;
3036 mqd->cp_gfx_hqd_wptr_hi = 0;
3038 /* set the pointer to the MQD */
3039 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
3040 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3042 /* set up mqd control */
3043 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
3044 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3045 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3046 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3047 mqd->cp_gfx_mqd_control = tmp;
3049 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3050 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
3051 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3052 mqd->cp_gfx_hqd_vmid = 0;
3054 /* set up default queue priority level
3055 * 0x0 = low priority, 0x1 = high priority */
3056 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
3057 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3058 mqd->cp_gfx_hqd_queue_priority = tmp;
3060 /* set up time quantum */
3061 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
3062 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3063 mqd->cp_gfx_hqd_quantum = tmp;
3065 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3066 hqd_gpu_addr = ring->gpu_addr >> 8;
3067 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3068 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3070 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3071 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3072 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3073 mqd->cp_gfx_hqd_rptr_addr_hi =
3074 upper_32_bits(wb_gpu_addr) & 0xffff;
3076 /* set up rb_wptr_poll addr */
3077 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3078 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3079 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3081 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3082 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
3083 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
3084 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3085 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3087 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3089 mqd->cp_gfx_hqd_cntl = tmp;
3091 /* set up cp_doorbell_control */
3092 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3093 if (ring->use_doorbell) {
3094 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3095 DOORBELL_OFFSET, ring->doorbell_index);
3096 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3099 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3101 mqd->cp_rb_doorbell_control = tmp;
3103 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3105 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
3107 /* active the queue */
3108 mqd->cp_gfx_hqd_active = 1;
3113 #ifdef BRING_UP_DEBUG
3114 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3116 struct amdgpu_device *adev = ring->adev;
3117 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3119 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3120 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3121 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3123 /* set GFX_MQD_BASE */
3124 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3125 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3127 /* set GFX_MQD_CONTROL */
3128 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3130 /* set GFX_HQD_VMID to 0 */
3131 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3133 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
3134 mqd->cp_gfx_hqd_queue_priority);
3135 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3137 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
3138 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3139 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3141 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3142 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3143 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3145 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3146 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3148 /* set RB_WPTR_POLL_ADDR */
3149 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3150 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3152 /* set RB_DOORBELL_CONTROL */
3153 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3155 /* active the queue */
3156 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3162 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
3164 struct amdgpu_device *adev = ring->adev;
3165 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3166 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3168 if (!adev->in_gpu_reset && !adev->in_suspend) {
3169 memset((void *)mqd, 0, sizeof(*mqd));
3170 mutex_lock(&adev->srbm_mutex);
3171 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3172 gfx_v10_0_gfx_mqd_init(ring);
3173 #ifdef BRING_UP_DEBUG
3174 gfx_v10_0_gfx_queue_init_register(ring);
3176 nv_grbm_select(adev, 0, 0, 0, 0);
3177 mutex_unlock(&adev->srbm_mutex);
3178 if (adev->gfx.me.mqd_backup[mqd_idx])
3179 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3180 } else if (adev->in_gpu_reset) {
3181 /* reset mqd with the backup copy */
3182 if (adev->gfx.me.mqd_backup[mqd_idx])
3183 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3184 /* reset the ring */
3186 adev->wb.wb[ring->wptr_offs] = 0;
3187 amdgpu_ring_clear_ring(ring);
3188 #ifdef BRING_UP_DEBUG
3189 mutex_lock(&adev->srbm_mutex);
3190 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3191 gfx_v10_0_gfx_queue_init_register(ring);
3192 nv_grbm_select(adev, 0, 0, 0, 0);
3193 mutex_unlock(&adev->srbm_mutex);
3196 amdgpu_ring_clear_ring(ring);
3202 #ifndef BRING_UP_DEBUG
3203 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
3205 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3206 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3209 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3212 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3213 adev->gfx.num_gfx_rings);
3215 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3219 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3220 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3222 return amdgpu_ring_test_helper(kiq_ring);
3226 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3229 struct amdgpu_ring *ring;
3231 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3232 ring = &adev->gfx.gfx_ring[i];
3234 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3235 if (unlikely(r != 0))
3238 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3240 r = gfx_v10_0_gfx_init_queue(ring);
3241 amdgpu_bo_kunmap(ring->mqd_obj);
3242 ring->mqd_ptr = NULL;
3244 amdgpu_bo_unreserve(ring->mqd_obj);
3248 #ifndef BRING_UP_DEBUG
3249 r = gfx_v10_0_kiq_enable_kgq(adev);
3253 r = gfx_v10_0_cp_gfx_start(adev);
3257 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3258 ring = &adev->gfx.gfx_ring[i];
3259 ring->sched.ready = true;
3265 static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
3267 struct amdgpu_device *adev = ring->adev;
3269 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3270 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
3271 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3272 ring->has_high_prio = true;
3273 mqd->cp_hqd_queue_priority =
3274 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3276 ring->has_high_prio = false;
3281 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3283 struct amdgpu_device *adev = ring->adev;
3284 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3285 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3288 mqd->header = 0xC0310800;
3289 mqd->compute_pipelinestat_enable = 0x00000001;
3290 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3291 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3292 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3293 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3294 mqd->compute_misc_reserved = 0x00000003;
3296 eop_base_addr = ring->eop_gpu_addr >> 8;
3297 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3298 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3300 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3301 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3302 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3303 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3305 mqd->cp_hqd_eop_control = tmp;
3307 /* enable doorbell? */
3308 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3310 if (ring->use_doorbell) {
3311 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3312 DOORBELL_OFFSET, ring->doorbell_index);
3313 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3315 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3316 DOORBELL_SOURCE, 0);
3317 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3320 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3324 mqd->cp_hqd_pq_doorbell_control = tmp;
3326 /* disable the queue if it's active */
3328 mqd->cp_hqd_dequeue_request = 0;
3329 mqd->cp_hqd_pq_rptr = 0;
3330 mqd->cp_hqd_pq_wptr_lo = 0;
3331 mqd->cp_hqd_pq_wptr_hi = 0;
3333 /* set the pointer to the MQD */
3334 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3335 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3337 /* set MQD vmid to 0 */
3338 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3339 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3340 mqd->cp_mqd_control = tmp;
3342 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3343 hqd_gpu_addr = ring->gpu_addr >> 8;
3344 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3345 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3347 /* set up the HQD, this is similar to CP_RB0_CNTL */
3348 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3349 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3350 (order_base_2(ring->ring_size / 4) - 1));
3351 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3352 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3354 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3356 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3357 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3358 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3359 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3360 mqd->cp_hqd_pq_control = tmp;
3362 /* set the wb address whether it's enabled or not */
3363 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3364 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3365 mqd->cp_hqd_pq_rptr_report_addr_hi =
3366 upper_32_bits(wb_gpu_addr) & 0xffff;
3368 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3369 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3370 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3371 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3374 /* enable the doorbell if requested */
3375 if (ring->use_doorbell) {
3376 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3377 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3378 DOORBELL_OFFSET, ring->doorbell_index);
3380 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3382 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3383 DOORBELL_SOURCE, 0);
3384 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3388 mqd->cp_hqd_pq_doorbell_control = tmp;
3390 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3392 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3394 /* set the vmid for the queue */
3395 mqd->cp_hqd_vmid = 0;
3397 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3398 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3399 mqd->cp_hqd_persistent_state = tmp;
3401 /* set MIN_IB_AVAIL_SIZE */
3402 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3403 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3404 mqd->cp_hqd_ib_control = tmp;
3406 /* set static priority for a compute queue/ring */
3407 gfx_v10_0_compute_mqd_set_priority(ring, mqd);
3409 /* map_queues packet doesn't need activate the queue,
3410 * so only kiq need set this field.
3412 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3413 mqd->cp_hqd_active = 1;
3418 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3420 struct amdgpu_device *adev = ring->adev;
3421 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3424 /* disable wptr polling */
3425 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3427 /* write the EOP addr */
3428 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3429 mqd->cp_hqd_eop_base_addr_lo);
3430 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3431 mqd->cp_hqd_eop_base_addr_hi);
3433 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3434 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3435 mqd->cp_hqd_eop_control);
3437 /* enable doorbell? */
3438 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3439 mqd->cp_hqd_pq_doorbell_control);
3441 /* disable the queue if it's active */
3442 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3443 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3444 for (j = 0; j < adev->usec_timeout; j++) {
3445 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3449 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3450 mqd->cp_hqd_dequeue_request);
3451 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3452 mqd->cp_hqd_pq_rptr);
3453 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3454 mqd->cp_hqd_pq_wptr_lo);
3455 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3456 mqd->cp_hqd_pq_wptr_hi);
3459 /* set the pointer to the MQD */
3460 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3461 mqd->cp_mqd_base_addr_lo);
3462 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3463 mqd->cp_mqd_base_addr_hi);
3465 /* set MQD vmid to 0 */
3466 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3467 mqd->cp_mqd_control);
3469 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3470 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3471 mqd->cp_hqd_pq_base_lo);
3472 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3473 mqd->cp_hqd_pq_base_hi);
3475 /* set up the HQD, this is similar to CP_RB0_CNTL */
3476 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3477 mqd->cp_hqd_pq_control);
3479 /* set the wb address whether it's enabled or not */
3480 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3481 mqd->cp_hqd_pq_rptr_report_addr_lo);
3482 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3483 mqd->cp_hqd_pq_rptr_report_addr_hi);
3485 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3486 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3487 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3488 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3489 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3491 /* enable the doorbell if requested */
3492 if (ring->use_doorbell) {
3493 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3494 (adev->doorbell_index.kiq * 2) << 2);
3495 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3496 (adev->doorbell_index.userqueue_end * 2) << 2);
3499 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3500 mqd->cp_hqd_pq_doorbell_control);
3502 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3503 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3504 mqd->cp_hqd_pq_wptr_lo);
3505 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3506 mqd->cp_hqd_pq_wptr_hi);
3508 /* set the vmid for the queue */
3509 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3511 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3512 mqd->cp_hqd_persistent_state);
3514 /* activate the queue */
3515 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3516 mqd->cp_hqd_active);
3518 if (ring->use_doorbell)
3519 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3524 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3526 struct amdgpu_device *adev = ring->adev;
3527 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3528 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3530 gfx_v10_0_kiq_setting(ring);
3532 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3533 /* reset MQD to a clean status */
3534 if (adev->gfx.mec.mqd_backup[mqd_idx])
3535 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3537 /* reset ring buffer */
3539 amdgpu_ring_clear_ring(ring);
3541 mutex_lock(&adev->srbm_mutex);
3542 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3543 gfx_v10_0_kiq_init_register(ring);
3544 nv_grbm_select(adev, 0, 0, 0, 0);
3545 mutex_unlock(&adev->srbm_mutex);
3547 memset((void *)mqd, 0, sizeof(*mqd));
3548 mutex_lock(&adev->srbm_mutex);
3549 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3550 gfx_v10_0_compute_mqd_init(ring);
3551 gfx_v10_0_kiq_init_register(ring);
3552 nv_grbm_select(adev, 0, 0, 0, 0);
3553 mutex_unlock(&adev->srbm_mutex);
3555 if (adev->gfx.mec.mqd_backup[mqd_idx])
3556 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3562 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3564 struct amdgpu_device *adev = ring->adev;
3565 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3566 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3568 if (!adev->in_gpu_reset && !adev->in_suspend) {
3569 memset((void *)mqd, 0, sizeof(*mqd));
3570 mutex_lock(&adev->srbm_mutex);
3571 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3572 gfx_v10_0_compute_mqd_init(ring);
3573 nv_grbm_select(adev, 0, 0, 0, 0);
3574 mutex_unlock(&adev->srbm_mutex);
3576 if (adev->gfx.mec.mqd_backup[mqd_idx])
3577 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3578 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3579 /* reset MQD to a clean status */
3580 if (adev->gfx.mec.mqd_backup[mqd_idx])
3581 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3583 /* reset ring buffer */
3585 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3586 amdgpu_ring_clear_ring(ring);
3588 amdgpu_ring_clear_ring(ring);
3594 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3596 struct amdgpu_ring *ring;
3599 ring = &adev->gfx.kiq.ring;
3601 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3602 if (unlikely(r != 0))
3605 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3606 if (unlikely(r != 0))
3609 gfx_v10_0_kiq_init_queue(ring);
3610 amdgpu_bo_kunmap(ring->mqd_obj);
3611 ring->mqd_ptr = NULL;
3612 amdgpu_bo_unreserve(ring->mqd_obj);
3613 ring->sched.ready = true;
3617 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3619 struct amdgpu_ring *ring = NULL;
3622 gfx_v10_0_cp_compute_enable(adev, true);
3624 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3625 ring = &adev->gfx.compute_ring[i];
3627 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3628 if (unlikely(r != 0))
3630 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3632 r = gfx_v10_0_kcq_init_queue(ring);
3633 amdgpu_bo_kunmap(ring->mqd_obj);
3634 ring->mqd_ptr = NULL;
3636 amdgpu_bo_unreserve(ring->mqd_obj);
3641 r = amdgpu_gfx_enable_kcq(adev);
3646 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3649 struct amdgpu_ring *ring;
3651 if (!(adev->flags & AMD_IS_APU))
3652 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3654 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3655 /* legacy firmware loading */
3656 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3660 r = gfx_v10_0_cp_compute_load_microcode(adev);
3665 r = gfx_v10_0_kiq_resume(adev);
3669 r = gfx_v10_0_kcq_resume(adev);
3673 if (!amdgpu_async_gfx_ring) {
3674 r = gfx_v10_0_cp_gfx_resume(adev);
3678 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3683 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3684 ring = &adev->gfx.gfx_ring[i];
3685 r = amdgpu_ring_test_helper(ring);
3690 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3691 ring = &adev->gfx.compute_ring[i];
3692 r = amdgpu_ring_test_helper(ring);
3700 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3702 gfx_v10_0_cp_gfx_enable(adev, enable);
3703 gfx_v10_0_cp_compute_enable(adev, enable);
3706 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3708 uint32_t data, pattern = 0xDEADBEEF;
3710 /* check if mmVGT_ESGS_RING_SIZE_UMD
3711 * has been remapped to mmVGT_ESGS_RING_SIZE */
3712 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3714 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3716 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3718 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3719 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3722 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3727 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3731 /* initialize cam_index to 0
3732 * index will auto-inc after each data writting */
3733 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3735 /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3736 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3737 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3738 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3739 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3740 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3741 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3743 /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3744 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3745 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3746 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3747 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3748 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3749 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3751 /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3752 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3753 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3754 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3755 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3756 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3757 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3759 /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3760 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3761 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3762 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3763 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3764 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3765 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3767 /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3768 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3769 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3770 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3771 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3772 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3773 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3775 /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3776 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3777 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3778 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3779 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3780 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3781 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3783 /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3784 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3785 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3786 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3787 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3788 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3789 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3792 static int gfx_v10_0_hw_init(void *handle)
3795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3797 if (!amdgpu_emu_mode)
3798 gfx_v10_0_init_golden_registers(adev);
3800 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3802 * For gfx 10, rlc firmware loading relies on smu firmware is
3803 * loaded firstly, so in direct type, it has to load smc ucode
3806 r = smu_load_microcode(&adev->smu);
3810 r = smu_check_fw_status(&adev->smu);
3812 pr_err("SMC firmware status is not correct\n");
3817 /* if GRBM CAM not remapped, set up the remapping */
3818 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3819 gfx_v10_0_setup_grbm_cam_remapping(adev);
3821 gfx_v10_0_constants_init(adev);
3823 r = gfx_v10_0_rlc_resume(adev);
3828 * init golden registers and rlc resume may override some registers,
3829 * reconfig them here
3831 gfx_v10_0_tcp_harvest(adev);
3833 r = gfx_v10_0_cp_resume(adev);
3840 #ifndef BRING_UP_DEBUG
3841 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3843 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3844 struct amdgpu_ring *kiq_ring = &kiq->ring;
3847 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3850 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3851 adev->gfx.num_gfx_rings))
3854 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3855 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3856 PREEMPT_QUEUES, 0, 0);
3858 return amdgpu_ring_test_helper(kiq_ring);
3862 static int gfx_v10_0_hw_fini(void *handle)
3864 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3867 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3868 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3869 #ifndef BRING_UP_DEBUG
3870 if (amdgpu_async_gfx_ring) {
3871 r = gfx_v10_0_kiq_disable_kgq(adev);
3873 DRM_ERROR("KGQ disable failed\n");
3876 if (amdgpu_gfx_disable_kcq(adev))
3877 DRM_ERROR("KCQ disable failed\n");
3878 if (amdgpu_sriov_vf(adev)) {
3879 gfx_v10_0_cp_gfx_enable(adev, false);
3882 gfx_v10_0_cp_enable(adev, false);
3883 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3888 static int gfx_v10_0_suspend(void *handle)
3890 return gfx_v10_0_hw_fini(handle);
3893 static int gfx_v10_0_resume(void *handle)
3895 return gfx_v10_0_hw_init(handle);
3898 static bool gfx_v10_0_is_idle(void *handle)
3900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3902 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3903 GRBM_STATUS, GUI_ACTIVE))
3909 static int gfx_v10_0_wait_for_idle(void *handle)
3913 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3915 for (i = 0; i < adev->usec_timeout; i++) {
3916 /* read MC_STATUS */
3917 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3918 GRBM_STATUS__GUI_ACTIVE_MASK;
3920 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3927 static int gfx_v10_0_soft_reset(void *handle)
3929 u32 grbm_soft_reset = 0;
3931 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3934 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3935 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3936 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3937 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3938 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3939 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3940 | GRBM_STATUS__BCI_BUSY_MASK)) {
3941 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3942 GRBM_SOFT_RESET, SOFT_RESET_CP,
3944 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3945 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3949 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3950 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3951 GRBM_SOFT_RESET, SOFT_RESET_CP,
3956 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3957 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3958 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3959 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3962 if (grbm_soft_reset) {
3964 gfx_v10_0_rlc_stop(adev);
3966 /* Disable GFX parsing/prefetching */
3967 gfx_v10_0_cp_gfx_enable(adev, false);
3969 /* Disable MEC parsing/prefetching */
3970 gfx_v10_0_cp_compute_enable(adev, false);
3972 if (grbm_soft_reset) {
3973 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3974 tmp |= grbm_soft_reset;
3975 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3976 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3977 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3981 tmp &= ~grbm_soft_reset;
3982 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3983 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3986 /* Wait a little for things to settle down */
3992 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3996 amdgpu_gfx_off_ctrl(adev, false);
3997 mutex_lock(&adev->gfx.gpu_clock_mutex);
3998 clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
3999 ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
4000 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4001 amdgpu_gfx_off_ctrl(adev, true);
4005 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4007 uint32_t gds_base, uint32_t gds_size,
4008 uint32_t gws_base, uint32_t gws_size,
4009 uint32_t oa_base, uint32_t oa_size)
4011 struct amdgpu_device *adev = ring->adev;
4014 gfx_v10_0_write_data_to_reg(ring, 0, false,
4015 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4019 gfx_v10_0_write_data_to_reg(ring, 0, false,
4020 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4024 gfx_v10_0_write_data_to_reg(ring, 0, false,
4025 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4026 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4029 gfx_v10_0_write_data_to_reg(ring, 0, false,
4030 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4031 (1 << (oa_size + oa_base)) - (1 << oa_base));
4034 static int gfx_v10_0_early_init(void *handle)
4036 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4038 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
4040 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4042 gfx_v10_0_set_kiq_pm4_funcs(adev);
4043 gfx_v10_0_set_ring_funcs(adev);
4044 gfx_v10_0_set_irq_funcs(adev);
4045 gfx_v10_0_set_gds_init(adev);
4046 gfx_v10_0_set_rlc_funcs(adev);
4051 static int gfx_v10_0_late_init(void *handle)
4053 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4056 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4060 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4067 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
4071 /* if RLC is not enabled, do nothing */
4072 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4073 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4076 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
4081 data = RLC_SAFE_MODE__CMD_MASK;
4082 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4083 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4085 /* wait for RLC_SAFE_MODE */
4086 for (i = 0; i < adev->usec_timeout; i++) {
4087 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4093 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
4097 data = RLC_SAFE_MODE__CMD_MASK;
4098 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4101 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4106 /* It is disabled by HW by default */
4107 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4108 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4109 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4110 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4111 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4112 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4114 /* only for Vega10 & Raven1 */
4115 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4118 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4120 /* MGLS is a global flag to control all MGLS in GFX */
4121 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4122 /* 2 - RLC memory Light sleep */
4123 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4124 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4125 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4127 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4129 /* 3 - CP memory Light sleep */
4130 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4131 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4132 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4134 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4138 /* 1 - MGCG_OVERRIDE */
4139 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4140 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4141 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4142 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4143 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4145 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4147 /* 2 - disable MGLS in RLC */
4148 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4149 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4150 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4151 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4154 /* 3 - disable MGLS in CP */
4155 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4156 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4157 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4158 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4163 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
4168 /* Enable 3D CGCG/CGLS */
4169 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4170 /* write cmd to clear cgcg/cgls ov */
4171 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4172 /* unset CGCG override */
4173 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4174 /* update CGCG and CGLS override bits */
4176 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4177 /* enable 3Dcgcg FSM(0x0000363f) */
4178 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4179 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4180 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4181 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4182 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4183 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4185 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4187 /* set IDLE_POLL_COUNT(0x00900100) */
4188 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4189 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4190 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4192 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4194 /* Disable CGCG/CGLS */
4195 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4196 /* disable cgcg, cgls should be disabled */
4197 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4198 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4199 /* disable cgcg and cgls in FSM */
4201 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4205 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4210 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4211 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4212 /* unset CGCG override */
4213 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4214 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4215 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4217 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4218 /* update CGCG and CGLS override bits */
4220 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4222 /* enable cgcg FSM(0x0000363F) */
4223 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4224 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4225 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4226 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4227 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4228 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4230 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4232 /* set IDLE_POLL_COUNT(0x00900100) */
4233 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4234 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4235 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4237 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4239 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4240 /* reset CGCG/CGLS bits */
4241 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4242 /* disable cgcg and cgls in FSM */
4244 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4248 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4251 amdgpu_gfx_rlc_enter_safe_mode(adev);
4254 /* CGCG/CGLS should be enabled after MGCG/MGLS
4255 * === MGCG + MGLS ===
4257 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4258 /* === CGCG /CGLS for GFX 3D Only === */
4259 gfx_v10_0_update_3d_clock_gating(adev, enable);
4260 /* === CGCG + CGLS === */
4261 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4263 /* CGCG/CGLS should be disabled before MGCG/MGLS
4264 * === CGCG + CGLS ===
4266 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4267 /* === CGCG /CGLS for GFX 3D Only === */
4268 gfx_v10_0_update_3d_clock_gating(adev, enable);
4269 /* === MGCG + MGLS === */
4270 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4273 if (adev->cg_flags &
4274 (AMD_CG_SUPPORT_GFX_MGCG |
4275 AMD_CG_SUPPORT_GFX_CGLS |
4276 AMD_CG_SUPPORT_GFX_CGCG |
4277 AMD_CG_SUPPORT_GFX_CGLS |
4278 AMD_CG_SUPPORT_GFX_3D_CGCG |
4279 AMD_CG_SUPPORT_GFX_3D_CGLS))
4280 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4282 amdgpu_gfx_rlc_exit_safe_mode(adev);
4287 static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4291 data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
4293 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4294 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4296 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
4299 static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
4301 struct soc15_reg_rlcg *entries, int arr_size)
4309 for (i = 0; i < arr_size; i++) {
4310 const struct soc15_reg_rlcg *entry;
4312 entry = &entries[i];
4313 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
4321 static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
4323 return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
4326 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4327 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4328 .set_safe_mode = gfx_v10_0_set_safe_mode,
4329 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4330 .init = gfx_v10_0_rlc_init,
4331 .get_csb_size = gfx_v10_0_get_csb_size,
4332 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4333 .resume = gfx_v10_0_rlc_resume,
4334 .stop = gfx_v10_0_rlc_stop,
4335 .reset = gfx_v10_0_rlc_reset,
4336 .start = gfx_v10_0_rlc_start,
4337 .update_spm_vmid = gfx_v10_0_update_spm_vmid,
4338 .rlcg_wreg = gfx_v10_rlcg_wreg,
4339 .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
4342 static int gfx_v10_0_set_powergating_state(void *handle,
4343 enum amd_powergating_state state)
4345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4346 bool enable = (state == AMD_PG_STATE_GATE);
4347 switch (adev->asic_type) {
4351 amdgpu_gfx_off_ctrl(adev, false);
4352 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4354 amdgpu_gfx_off_ctrl(adev, true);
4362 static int gfx_v10_0_set_clockgating_state(void *handle,
4363 enum amd_clockgating_state state)
4365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4367 switch (adev->asic_type) {
4371 gfx_v10_0_update_gfx_clock_gating(adev,
4372 state == AMD_CG_STATE_GATE);
4380 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4385 /* AMD_CG_SUPPORT_GFX_MGCG */
4386 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4387 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4388 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4390 /* AMD_CG_SUPPORT_GFX_CGCG */
4391 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4392 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4393 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4395 /* AMD_CG_SUPPORT_GFX_CGLS */
4396 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4397 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4399 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4400 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4401 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4402 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4404 /* AMD_CG_SUPPORT_GFX_CP_LS */
4405 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4406 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4407 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4409 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4410 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4411 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4412 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4414 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4415 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4416 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4419 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4421 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4424 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4426 struct amdgpu_device *adev = ring->adev;
4429 /* XXX check if swapping is necessary on BE */
4430 if (ring->use_doorbell) {
4431 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4433 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4434 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4440 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4442 struct amdgpu_device *adev = ring->adev;
4444 if (ring->use_doorbell) {
4445 /* XXX check if swapping is necessary on BE */
4446 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4447 WDOORBELL64(ring->doorbell_index, ring->wptr);
4449 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4450 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4454 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4456 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4459 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4463 /* XXX check if swapping is necessary on BE */
4464 if (ring->use_doorbell)
4465 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4471 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4473 struct amdgpu_device *adev = ring->adev;
4475 /* XXX check if swapping is necessary on BE */
4476 if (ring->use_doorbell) {
4477 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4478 WDOORBELL64(ring->doorbell_index, ring->wptr);
4480 BUG(); /* only DOORBELL method supported on gfx10 now */
4484 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4486 struct amdgpu_device *adev = ring->adev;
4487 u32 ref_and_mask, reg_mem_engine;
4488 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4490 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4493 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4496 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4503 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4504 reg_mem_engine = 1; /* pfp */
4507 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4508 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4509 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4510 ref_and_mask, ref_and_mask, 0x20);
4513 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4514 struct amdgpu_job *job,
4515 struct amdgpu_ib *ib,
4518 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4519 u32 header, control = 0;
4521 if (ib->flags & AMDGPU_IB_FLAG_CE)
4522 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4524 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4526 control |= ib->length_dw | (vmid << 24);
4528 if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4529 control |= INDIRECT_BUFFER_PRE_ENB(1);
4531 if (flags & AMDGPU_IB_PREEMPTED)
4532 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4534 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
4535 gfx_v10_0_ring_emit_de_meta(ring,
4536 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
4539 amdgpu_ring_write(ring, header);
4540 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4541 amdgpu_ring_write(ring,
4545 lower_32_bits(ib->gpu_addr));
4546 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4547 amdgpu_ring_write(ring, control);
4550 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4551 struct amdgpu_job *job,
4552 struct amdgpu_ib *ib,
4555 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4556 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4558 /* Currently, there is a high possibility to get wave ID mismatch
4559 * between ME and GDS, leading to a hw deadlock, because ME generates
4560 * different wave IDs than the GDS expects. This situation happens
4561 * randomly when at least 5 compute pipes use GDS ordered append.
4562 * The wave IDs generated by ME are also wrong after suspend/resume.
4563 * Those are probably bugs somewhere else in the kernel driver.
4565 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4566 * GDS to 0 for this ring (me/pipe).
4568 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4569 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4570 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4571 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4574 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4575 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4576 amdgpu_ring_write(ring,
4580 lower_32_bits(ib->gpu_addr));
4581 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4582 amdgpu_ring_write(ring, control);
4585 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4586 u64 seq, unsigned flags)
4588 struct amdgpu_device *adev = ring->adev;
4589 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4590 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4592 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4593 if (adev->pdev->device == 0x50)
4596 /* RELEASE_MEM - flush caches, send int */
4597 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4598 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4599 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4600 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4601 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4602 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4603 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4604 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4605 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4606 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4609 * the address should be Qword aligned if 64bit write, Dword
4610 * aligned if only send 32bit data low (discard data high)
4616 amdgpu_ring_write(ring, lower_32_bits(addr));
4617 amdgpu_ring_write(ring, upper_32_bits(addr));
4618 amdgpu_ring_write(ring, lower_32_bits(seq));
4619 amdgpu_ring_write(ring, upper_32_bits(seq));
4620 amdgpu_ring_write(ring, 0);
4623 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4625 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4626 uint32_t seq = ring->fence_drv.sync_seq;
4627 uint64_t addr = ring->fence_drv.gpu_addr;
4629 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4630 upper_32_bits(addr), seq, 0xffffffff, 4);
4633 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4634 unsigned vmid, uint64_t pd_addr)
4636 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4638 /* compute doesn't have PFP */
4639 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4640 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4641 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4642 amdgpu_ring_write(ring, 0x0);
4646 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4647 u64 seq, unsigned int flags)
4649 struct amdgpu_device *adev = ring->adev;
4651 /* we only allocate 32bit for each seq wb address */
4652 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4654 /* write fence seq to the "addr" */
4655 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4656 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4657 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4658 amdgpu_ring_write(ring, lower_32_bits(addr));
4659 amdgpu_ring_write(ring, upper_32_bits(addr));
4660 amdgpu_ring_write(ring, lower_32_bits(seq));
4662 if (flags & AMDGPU_FENCE_FLAG_INT) {
4663 /* set register to trigger INT */
4664 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4665 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4666 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4667 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4668 amdgpu_ring_write(ring, 0);
4669 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4673 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4675 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4676 amdgpu_ring_write(ring, 0);
4679 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4683 if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
4684 gfx_v10_0_ring_emit_ce_meta(ring,
4685 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
4687 gfx_v10_0_ring_emit_tmz(ring, true);
4689 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4690 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4691 /* set load_global_config & load_global_uconfig */
4693 /* set load_cs_sh_regs */
4695 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4698 /* set load_ce_ram if preamble presented */
4699 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4702 /* still load_ce_ram if this is the first time preamble presented
4703 * although there is no context switch happens.
4705 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4709 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4710 amdgpu_ring_write(ring, dw2);
4711 amdgpu_ring_write(ring, 0);
4714 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4718 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4719 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4720 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4721 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4722 ret = ring->wptr & ring->buf_mask;
4723 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4728 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4731 BUG_ON(offset > ring->buf_mask);
4732 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4734 cur = (ring->wptr - 1) & ring->buf_mask;
4735 if (likely(cur > offset))
4736 ring->ring[offset] = cur - offset;
4738 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4741 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4744 struct amdgpu_device *adev = ring->adev;
4745 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4746 struct amdgpu_ring *kiq_ring = &kiq->ring;
4748 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4751 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4754 /* assert preemption condition */
4755 amdgpu_ring_set_preempt_cond_exec(ring, false);
4757 /* assert IB preemption, emit the trailing fence */
4758 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4759 ring->trail_fence_gpu_addr,
4761 amdgpu_ring_commit(kiq_ring);
4763 /* poll the trailing fence */
4764 for (i = 0; i < adev->usec_timeout; i++) {
4765 if (ring->trail_seq ==
4766 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4771 if (i >= adev->usec_timeout) {
4773 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4776 /* deassert preemption condition */
4777 amdgpu_ring_set_preempt_cond_exec(ring, true);
4781 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4783 struct amdgpu_device *adev = ring->adev;
4784 struct v10_ce_ib_state ce_payload = {0};
4788 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4789 csa_addr = amdgpu_csa_vaddr(ring->adev);
4791 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4792 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4793 WRITE_DATA_DST_SEL(8) |
4795 WRITE_DATA_CACHE_POLICY(0));
4796 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4797 offsetof(struct v10_gfx_meta_data, ce_payload)));
4798 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4799 offsetof(struct v10_gfx_meta_data, ce_payload)));
4802 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4803 offsetof(struct v10_gfx_meta_data,
4805 sizeof(ce_payload) >> 2);
4807 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4808 sizeof(ce_payload) >> 2);
4811 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4813 struct amdgpu_device *adev = ring->adev;
4814 struct v10_de_ib_state de_payload = {0};
4815 uint64_t csa_addr, gds_addr;
4818 csa_addr = amdgpu_csa_vaddr(ring->adev);
4819 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4821 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4822 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4824 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4825 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4826 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4827 WRITE_DATA_DST_SEL(8) |
4829 WRITE_DATA_CACHE_POLICY(0));
4830 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4831 offsetof(struct v10_gfx_meta_data, de_payload)));
4832 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4833 offsetof(struct v10_gfx_meta_data, de_payload)));
4836 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4837 offsetof(struct v10_gfx_meta_data,
4839 sizeof(de_payload) >> 2);
4841 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4842 sizeof(de_payload) >> 2);
4845 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4847 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4848 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4851 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4853 struct amdgpu_device *adev = ring->adev;
4854 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4856 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4857 amdgpu_ring_write(ring, 0 | /* src: register*/
4858 (5 << 8) | /* dst: memory */
4859 (1 << 20)); /* write confirm */
4860 amdgpu_ring_write(ring, reg);
4861 amdgpu_ring_write(ring, 0);
4862 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4863 kiq->reg_val_offs * 4));
4864 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4865 kiq->reg_val_offs * 4));
4868 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4873 switch (ring->funcs->type) {
4874 case AMDGPU_RING_TYPE_GFX:
4875 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4877 case AMDGPU_RING_TYPE_KIQ:
4878 cmd = (1 << 16); /* no inc addr */
4884 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4885 amdgpu_ring_write(ring, cmd);
4886 amdgpu_ring_write(ring, reg);
4887 amdgpu_ring_write(ring, 0);
4888 amdgpu_ring_write(ring, val);
4891 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4892 uint32_t val, uint32_t mask)
4894 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4897 static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4898 uint32_t reg0, uint32_t reg1,
4899 uint32_t ref, uint32_t mask)
4901 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4902 struct amdgpu_device *adev = ring->adev;
4903 bool fw_version_ok = false;
4905 fw_version_ok = adev->gfx.cp_fw_write_wait;
4908 gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4911 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4916 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4917 uint32_t me, uint32_t pipe,
4918 enum amdgpu_interrupt_state state)
4920 uint32_t cp_int_cntl, cp_int_cntl_reg;
4925 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4928 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4931 DRM_DEBUG("invalid pipe %d\n", pipe);
4935 DRM_DEBUG("invalid me %d\n", me);
4940 case AMDGPU_IRQ_STATE_DISABLE:
4941 cp_int_cntl = RREG32(cp_int_cntl_reg);
4942 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4943 TIME_STAMP_INT_ENABLE, 0);
4944 WREG32(cp_int_cntl_reg, cp_int_cntl);
4946 case AMDGPU_IRQ_STATE_ENABLE:
4947 cp_int_cntl = RREG32(cp_int_cntl_reg);
4948 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4949 TIME_STAMP_INT_ENABLE, 1);
4950 WREG32(cp_int_cntl_reg, cp_int_cntl);
4957 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4959 enum amdgpu_interrupt_state state)
4961 u32 mec_int_cntl, mec_int_cntl_reg;
4964 * amdgpu controls only the first MEC. That's why this function only
4965 * handles the setting of interrupts for this specific MEC. All other
4966 * pipes' interrupts are set by amdkfd.
4972 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4975 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4978 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4981 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4984 DRM_DEBUG("invalid pipe %d\n", pipe);
4988 DRM_DEBUG("invalid me %d\n", me);
4993 case AMDGPU_IRQ_STATE_DISABLE:
4994 mec_int_cntl = RREG32(mec_int_cntl_reg);
4995 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4996 TIME_STAMP_INT_ENABLE, 0);
4997 WREG32(mec_int_cntl_reg, mec_int_cntl);
4999 case AMDGPU_IRQ_STATE_ENABLE:
5000 mec_int_cntl = RREG32(mec_int_cntl_reg);
5001 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5002 TIME_STAMP_INT_ENABLE, 1);
5003 WREG32(mec_int_cntl_reg, mec_int_cntl);
5010 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5011 struct amdgpu_irq_src *src,
5013 enum amdgpu_interrupt_state state)
5016 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5017 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5019 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5020 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5022 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5023 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5025 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5026 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5028 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5029 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5031 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5032 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5034 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5035 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5037 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5038 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5040 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5041 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5043 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5044 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5052 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
5053 struct amdgpu_irq_src *source,
5054 struct amdgpu_iv_entry *entry)
5057 u8 me_id, pipe_id, queue_id;
5058 struct amdgpu_ring *ring;
5060 DRM_DEBUG("IH: CP EOP\n");
5061 me_id = (entry->ring_id & 0x0c) >> 2;
5062 pipe_id = (entry->ring_id & 0x03) >> 0;
5063 queue_id = (entry->ring_id & 0x70) >> 4;
5068 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5070 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5074 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5075 ring = &adev->gfx.compute_ring[i];
5076 /* Per-queue interrupt is supported for MEC starting from VI.
5077 * The interrupt can only be enabled/disabled per pipe instead of per queue.
5079 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5080 amdgpu_fence_process(ring);
5087 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5088 struct amdgpu_irq_src *source,
5090 enum amdgpu_interrupt_state state)
5093 case AMDGPU_IRQ_STATE_DISABLE:
5094 case AMDGPU_IRQ_STATE_ENABLE:
5095 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5096 PRIV_REG_INT_ENABLE,
5097 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5106 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5107 struct amdgpu_irq_src *source,
5109 enum amdgpu_interrupt_state state)
5112 case AMDGPU_IRQ_STATE_DISABLE:
5113 case AMDGPU_IRQ_STATE_ENABLE:
5114 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5115 PRIV_INSTR_INT_ENABLE,
5116 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5124 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
5125 struct amdgpu_iv_entry *entry)
5127 u8 me_id, pipe_id, queue_id;
5128 struct amdgpu_ring *ring;
5131 me_id = (entry->ring_id & 0x0c) >> 2;
5132 pipe_id = (entry->ring_id & 0x03) >> 0;
5133 queue_id = (entry->ring_id & 0x70) >> 4;
5137 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5138 ring = &adev->gfx.gfx_ring[i];
5139 /* we only enabled 1 gfx queue per pipe for now */
5140 if (ring->me == me_id && ring->pipe == pipe_id)
5141 drm_sched_fault(&ring->sched);
5146 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5147 ring = &adev->gfx.compute_ring[i];
5148 if (ring->me == me_id && ring->pipe == pipe_id &&
5149 ring->queue == queue_id)
5150 drm_sched_fault(&ring->sched);
5158 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
5159 struct amdgpu_irq_src *source,
5160 struct amdgpu_iv_entry *entry)
5162 DRM_ERROR("Illegal register access in command stream\n");
5163 gfx_v10_0_handle_priv_fault(adev, entry);
5167 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
5168 struct amdgpu_irq_src *source,
5169 struct amdgpu_iv_entry *entry)
5171 DRM_ERROR("Illegal instruction in command stream\n");
5172 gfx_v10_0_handle_priv_fault(adev, entry);
5176 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5177 struct amdgpu_irq_src *src,
5179 enum amdgpu_interrupt_state state)
5181 uint32_t tmp, target;
5182 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5185 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5187 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
5188 target += ring->pipe;
5191 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
5192 if (state == AMDGPU_IRQ_STATE_DISABLE) {
5193 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5194 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5195 GENERIC2_INT_ENABLE, 0);
5196 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5198 tmp = RREG32(target);
5199 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5200 GENERIC2_INT_ENABLE, 0);
5201 WREG32(target, tmp);
5203 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5204 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5205 GENERIC2_INT_ENABLE, 1);
5206 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5208 tmp = RREG32(target);
5209 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5210 GENERIC2_INT_ENABLE, 1);
5211 WREG32(target, tmp);
5215 BUG(); /* kiq only support GENERIC2_INT now */
5221 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
5222 struct amdgpu_irq_src *source,
5223 struct amdgpu_iv_entry *entry)
5225 u8 me_id, pipe_id, queue_id;
5226 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5228 me_id = (entry->ring_id & 0x0c) >> 2;
5229 pipe_id = (entry->ring_id & 0x03) >> 0;
5230 queue_id = (entry->ring_id & 0x70) >> 4;
5231 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
5232 me_id, pipe_id, queue_id);
5234 amdgpu_fence_process(ring);
5238 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
5239 .name = "gfx_v10_0",
5240 .early_init = gfx_v10_0_early_init,
5241 .late_init = gfx_v10_0_late_init,
5242 .sw_init = gfx_v10_0_sw_init,
5243 .sw_fini = gfx_v10_0_sw_fini,
5244 .hw_init = gfx_v10_0_hw_init,
5245 .hw_fini = gfx_v10_0_hw_fini,
5246 .suspend = gfx_v10_0_suspend,
5247 .resume = gfx_v10_0_resume,
5248 .is_idle = gfx_v10_0_is_idle,
5249 .wait_for_idle = gfx_v10_0_wait_for_idle,
5250 .soft_reset = gfx_v10_0_soft_reset,
5251 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
5252 .set_powergating_state = gfx_v10_0_set_powergating_state,
5253 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
5256 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5257 .type = AMDGPU_RING_TYPE_GFX,
5259 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5260 .support_64bit_ptrs = true,
5261 .vmhub = AMDGPU_GFXHUB_0,
5262 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
5263 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
5264 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
5265 .emit_frame_size = /* totally 242 maximum if 16 IBs */
5267 7 + /* PIPELINE_SYNC */
5268 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5269 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5271 8 + /* FENCE for VM_FLUSH */
5272 20 + /* GDS switch */
5273 4 + /* double SWITCH_BUFFER,
5274 * the first COND_EXEC jump to the place
5275 * just prior to this double SWITCH_BUFFER
5284 8 + 8 + /* FENCE x2 */
5285 2, /* SWITCH_BUFFER */
5286 .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
5287 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
5288 .emit_fence = gfx_v10_0_ring_emit_fence,
5289 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5290 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5291 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5292 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5293 .test_ring = gfx_v10_0_ring_test_ring,
5294 .test_ib = gfx_v10_0_ring_test_ib,
5295 .insert_nop = amdgpu_ring_insert_nop,
5296 .pad_ib = amdgpu_ring_generic_pad_ib,
5297 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
5298 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
5299 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
5300 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
5301 .preempt_ib = gfx_v10_0_ring_preempt_ib,
5302 .emit_tmz = gfx_v10_0_ring_emit_tmz,
5303 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5304 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5305 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5308 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5309 .type = AMDGPU_RING_TYPE_COMPUTE,
5311 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5312 .support_64bit_ptrs = true,
5313 .vmhub = AMDGPU_GFXHUB_0,
5314 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5315 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5316 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5318 20 + /* gfx_v10_0_ring_emit_gds_switch */
5319 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5320 5 + /* hdp invalidate */
5321 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5322 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5323 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5324 2 + /* gfx_v10_0_ring_emit_vm_flush */
5325 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
5326 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5327 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5328 .emit_fence = gfx_v10_0_ring_emit_fence,
5329 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5330 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5331 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5332 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5333 .test_ring = gfx_v10_0_ring_test_ring,
5334 .test_ib = gfx_v10_0_ring_test_ib,
5335 .insert_nop = amdgpu_ring_insert_nop,
5336 .pad_ib = amdgpu_ring_generic_pad_ib,
5337 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5338 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5339 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5342 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5343 .type = AMDGPU_RING_TYPE_KIQ,
5345 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5346 .support_64bit_ptrs = true,
5347 .vmhub = AMDGPU_GFXHUB_0,
5348 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5349 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5350 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5352 20 + /* gfx_v10_0_ring_emit_gds_switch */
5353 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5354 5 + /*hdp invalidate */
5355 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5356 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5357 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5358 2 + /* gfx_v10_0_ring_emit_vm_flush */
5359 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5360 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5361 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5362 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5363 .test_ring = gfx_v10_0_ring_test_ring,
5364 .test_ib = gfx_v10_0_ring_test_ib,
5365 .insert_nop = amdgpu_ring_insert_nop,
5366 .pad_ib = amdgpu_ring_generic_pad_ib,
5367 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5368 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5369 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5370 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5373 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5377 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5379 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5380 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5382 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5383 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5386 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5387 .set = gfx_v10_0_set_eop_interrupt_state,
5388 .process = gfx_v10_0_eop_irq,
5391 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5392 .set = gfx_v10_0_set_priv_reg_fault_state,
5393 .process = gfx_v10_0_priv_reg_irq,
5396 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5397 .set = gfx_v10_0_set_priv_inst_fault_state,
5398 .process = gfx_v10_0_priv_inst_irq,
5401 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5402 .set = gfx_v10_0_kiq_set_interrupt_state,
5403 .process = gfx_v10_0_kiq_irq,
5406 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5408 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5409 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5411 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5412 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5414 adev->gfx.priv_reg_irq.num_types = 1;
5415 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5417 adev->gfx.priv_inst_irq.num_types = 1;
5418 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5421 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5423 switch (adev->asic_type) {
5427 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5434 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5436 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
5437 adev->gfx.config.max_sh_per_se *
5438 adev->gfx.config.max_shader_engines;
5440 adev->gds.gds_size = 0x10000;
5441 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
5442 adev->gds.gws_size = 64;
5443 adev->gds.oa_size = 16;
5446 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5454 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5455 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5457 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5460 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5462 u32 data, wgp_bitmask;
5463 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5464 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5466 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5467 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5470 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5472 return (~data) & wgp_bitmask;
5475 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5477 u32 wgp_idx, wgp_active_bitmap;
5478 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5480 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5481 cu_active_bitmap = 0;
5483 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5484 /* if there is one WGP enabled, it means 2 CUs will be enabled */
5485 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5486 if (wgp_active_bitmap & (1 << wgp_idx))
5487 cu_active_bitmap |= cu_bitmap_per_wgp;
5490 return cu_active_bitmap;
5493 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5494 struct amdgpu_cu_info *cu_info)
5496 int i, j, k, counter, active_cu_number = 0;
5497 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5498 unsigned disable_masks[4 * 2];
5500 if (!adev || !cu_info)
5503 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5505 mutex_lock(&adev->grbm_idx_mutex);
5506 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5507 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5511 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5513 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5514 adev, disable_masks[i * 2 + j]);
5515 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5516 cu_info->bitmap[i][j] = bitmap;
5518 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5519 if (bitmap & mask) {
5520 if (counter < adev->gfx.config.max_cu_per_sh)
5526 active_cu_number += counter;
5528 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5529 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5532 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5533 mutex_unlock(&adev->grbm_idx_mutex);
5535 cu_info->number = active_cu_number;
5536 cu_info->ao_cu_mask = ao_cu_mask;
5537 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5542 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5544 .type = AMD_IP_BLOCK_TYPE_GFX,
5548 .funcs = &gfx_v10_0_ip_funcs,