2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
40 #define GFX9_NUM_GFX_RINGS 1
41 #define GFX9_NUM_COMPUTE_RINGS 8
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x2000
44 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
45 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
46 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
47 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
48 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
49 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
51 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
52 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
53 MODULE_FIRMWARE("amdgpu/raven_me.bin");
54 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
55 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
56 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
58 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
60 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
61 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
62 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
63 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
64 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
65 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
66 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
67 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
68 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
69 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
70 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
71 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
72 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
73 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
74 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
75 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
76 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
77 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
78 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
79 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
80 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
81 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
82 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
83 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
84 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
85 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
86 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
87 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
88 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
89 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
90 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
91 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
94 static const u32 golden_settings_gc_9_0[] =
96 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00ffeff, 0x00000400,
97 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
98 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
99 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
100 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
101 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
102 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
103 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff
106 static const u32 golden_settings_gc_9_0_vg10[] =
108 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
109 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
110 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
111 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
112 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
113 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
114 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800,
115 SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1),0x0000000f, 0x00000007
118 static const u32 golden_settings_gc_9_1[] =
120 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
121 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
122 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
123 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
124 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
125 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
126 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
127 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
128 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
129 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff
132 static const u32 golden_settings_gc_9_1_rv1[] =
134 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x26013042,
135 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x26013042,
136 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x00048000,
137 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
140 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
141 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x26013042
143 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
144 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
145 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
146 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
147 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
148 struct amdgpu_cu_info *cu_info);
149 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
150 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
151 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
153 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
155 switch (adev->asic_type) {
157 amdgpu_program_register_sequence(adev,
158 golden_settings_gc_9_0,
159 (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
160 amdgpu_program_register_sequence(adev,
161 golden_settings_gc_9_0_vg10,
162 (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
165 amdgpu_program_register_sequence(adev,
166 golden_settings_gc_9_1,
167 (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
168 amdgpu_program_register_sequence(adev,
169 golden_settings_gc_9_1_rv1,
170 (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
177 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
179 adev->gfx.scratch.num_reg = 7;
180 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
181 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
184 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
185 bool wc, uint32_t reg, uint32_t val)
187 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
188 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
189 WRITE_DATA_DST_SEL(0) |
190 (wc ? WR_CONFIRM : 0));
191 amdgpu_ring_write(ring, reg);
192 amdgpu_ring_write(ring, 0);
193 amdgpu_ring_write(ring, val);
196 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
197 int mem_space, int opt, uint32_t addr0,
198 uint32_t addr1, uint32_t ref, uint32_t mask,
201 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
202 amdgpu_ring_write(ring,
203 /* memory (1) or register (0) */
204 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
205 WAIT_REG_MEM_OPERATION(opt) | /* wait */
206 WAIT_REG_MEM_FUNCTION(3) | /* equal */
207 WAIT_REG_MEM_ENGINE(eng_sel)));
210 BUG_ON(addr0 & 0x3); /* Dword align */
211 amdgpu_ring_write(ring, addr0);
212 amdgpu_ring_write(ring, addr1);
213 amdgpu_ring_write(ring, ref);
214 amdgpu_ring_write(ring, mask);
215 amdgpu_ring_write(ring, inv); /* poll interval */
218 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
220 struct amdgpu_device *adev = ring->adev;
226 r = amdgpu_gfx_scratch_get(adev, &scratch);
228 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
231 WREG32(scratch, 0xCAFEDEAD);
232 r = amdgpu_ring_alloc(ring, 3);
234 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
236 amdgpu_gfx_scratch_free(adev, scratch);
239 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
240 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
241 amdgpu_ring_write(ring, 0xDEADBEEF);
242 amdgpu_ring_commit(ring);
244 for (i = 0; i < adev->usec_timeout; i++) {
245 tmp = RREG32(scratch);
246 if (tmp == 0xDEADBEEF)
250 if (i < adev->usec_timeout) {
251 DRM_INFO("ring test on %d succeeded in %d usecs\n",
254 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
255 ring->idx, scratch, tmp);
258 amdgpu_gfx_scratch_free(adev, scratch);
262 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
264 struct amdgpu_device *adev = ring->adev;
266 struct dma_fence *f = NULL;
271 r = amdgpu_gfx_scratch_get(adev, &scratch);
273 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
276 WREG32(scratch, 0xCAFEDEAD);
277 memset(&ib, 0, sizeof(ib));
278 r = amdgpu_ib_get(adev, NULL, 256, &ib);
280 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
283 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
284 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
285 ib.ptr[2] = 0xDEADBEEF;
288 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
292 r = dma_fence_wait_timeout(f, false, timeout);
294 DRM_ERROR("amdgpu: IB test timed out.\n");
298 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
301 tmp = RREG32(scratch);
302 if (tmp == 0xDEADBEEF) {
303 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
306 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
311 amdgpu_ib_free(adev, &ib, NULL);
314 amdgpu_gfx_scratch_free(adev, scratch);
318 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
320 const char *chip_name;
323 struct amdgpu_firmware_info *info = NULL;
324 const struct common_firmware_header *header = NULL;
325 const struct gfx_firmware_header_v1_0 *cp_hdr;
329 switch (adev->asic_type) {
331 chip_name = "vega10";
340 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
341 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
344 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
347 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
348 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
349 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
351 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
352 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
355 err = amdgpu_ucode_validate(adev->gfx.me_fw);
358 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
359 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
360 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
362 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
363 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
366 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
369 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
370 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
371 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
373 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
374 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
377 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
378 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
379 adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
380 adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
382 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
383 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
386 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
389 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
390 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
391 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
394 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
395 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
397 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
400 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
401 adev->gfx.mec2_fw->data;
402 adev->gfx.mec2_fw_version =
403 le32_to_cpu(cp_hdr->header.ucode_version);
404 adev->gfx.mec2_feature_version =
405 le32_to_cpu(cp_hdr->ucode_feature_version);
408 adev->gfx.mec2_fw = NULL;
411 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
412 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
413 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
414 info->fw = adev->gfx.pfp_fw;
415 header = (const struct common_firmware_header *)info->fw->data;
416 adev->firmware.fw_size +=
417 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
419 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
420 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
421 info->fw = adev->gfx.me_fw;
422 header = (const struct common_firmware_header *)info->fw->data;
423 adev->firmware.fw_size +=
424 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
426 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
427 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
428 info->fw = adev->gfx.ce_fw;
429 header = (const struct common_firmware_header *)info->fw->data;
430 adev->firmware.fw_size +=
431 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
433 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
434 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
435 info->fw = adev->gfx.rlc_fw;
436 header = (const struct common_firmware_header *)info->fw->data;
437 adev->firmware.fw_size +=
438 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
440 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
441 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
442 info->fw = adev->gfx.mec_fw;
443 header = (const struct common_firmware_header *)info->fw->data;
444 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
445 adev->firmware.fw_size +=
446 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
448 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
449 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
450 info->fw = adev->gfx.mec_fw;
451 adev->firmware.fw_size +=
452 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
454 if (adev->gfx.mec2_fw) {
455 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
456 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
457 info->fw = adev->gfx.mec2_fw;
458 header = (const struct common_firmware_header *)info->fw->data;
459 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
460 adev->firmware.fw_size +=
461 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
462 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
463 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
464 info->fw = adev->gfx.mec2_fw;
465 adev->firmware.fw_size +=
466 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
474 "gfx9: Failed to load firmware \"%s\"\n",
476 release_firmware(adev->gfx.pfp_fw);
477 adev->gfx.pfp_fw = NULL;
478 release_firmware(adev->gfx.me_fw);
479 adev->gfx.me_fw = NULL;
480 release_firmware(adev->gfx.ce_fw);
481 adev->gfx.ce_fw = NULL;
482 release_firmware(adev->gfx.rlc_fw);
483 adev->gfx.rlc_fw = NULL;
484 release_firmware(adev->gfx.mec_fw);
485 adev->gfx.mec_fw = NULL;
486 release_firmware(adev->gfx.mec2_fw);
487 adev->gfx.mec2_fw = NULL;
492 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
496 if (adev->gfx.mec.hpd_eop_obj) {
497 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
498 if (unlikely(r != 0))
499 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
500 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
501 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
503 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
504 adev->gfx.mec.hpd_eop_obj = NULL;
506 if (adev->gfx.mec.mec_fw_obj) {
507 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
508 if (unlikely(r != 0))
509 dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
510 amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
511 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
513 amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
514 adev->gfx.mec.mec_fw_obj = NULL;
518 #define MEC_HPD_SIZE 2048
520 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
524 const __le32 *fw_data;
528 const struct gfx_firmware_header_v1_0 *mec_hdr;
531 * we assign only 1 pipe because all other pipes will
534 adev->gfx.mec.num_mec = 1;
535 adev->gfx.mec.num_pipe = 1;
536 adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
538 if (adev->gfx.mec.hpd_eop_obj == NULL) {
539 r = amdgpu_bo_create(adev,
540 adev->gfx.mec.num_queue * MEC_HPD_SIZE,
542 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
543 &adev->gfx.mec.hpd_eop_obj);
545 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
550 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
551 if (unlikely(r != 0)) {
552 gfx_v9_0_mec_fini(adev);
555 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
556 &adev->gfx.mec.hpd_eop_gpu_addr);
558 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
559 gfx_v9_0_mec_fini(adev);
562 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
564 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
565 gfx_v9_0_mec_fini(adev);
569 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
571 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
572 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
574 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
576 fw_data = (const __le32 *)
577 (adev->gfx.mec_fw->data +
578 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
579 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
581 if (adev->gfx.mec.mec_fw_obj == NULL) {
582 r = amdgpu_bo_create(adev,
583 mec_hdr->header.ucode_size_bytes,
585 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
586 &adev->gfx.mec.mec_fw_obj);
588 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
593 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
594 if (unlikely(r != 0)) {
595 gfx_v9_0_mec_fini(adev);
598 r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
599 &adev->gfx.mec.mec_fw_gpu_addr);
601 dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
602 gfx_v9_0_mec_fini(adev);
605 r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
607 dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
608 gfx_v9_0_mec_fini(adev);
611 memcpy(fw, fw_data, fw_size);
613 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
614 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
620 static void gfx_v9_0_kiq_fini(struct amdgpu_device *adev)
622 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
624 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
627 static int gfx_v9_0_kiq_init(struct amdgpu_device *adev)
631 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
633 r = amdgpu_bo_create_kernel(adev, MEC_HPD_SIZE, PAGE_SIZE,
634 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
635 &kiq->eop_gpu_addr, (void **)&hpd);
637 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
641 memset(hpd, 0, MEC_HPD_SIZE);
643 r = amdgpu_bo_reserve(kiq->eop_obj, true);
644 if (unlikely(r != 0))
645 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
646 amdgpu_bo_kunmap(kiq->eop_obj);
647 amdgpu_bo_unreserve(kiq->eop_obj);
652 static int gfx_v9_0_kiq_init_ring(struct amdgpu_device *adev,
653 struct amdgpu_ring *ring,
654 struct amdgpu_irq_src *irq)
656 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
659 mutex_init(&kiq->ring_mutex);
661 r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
666 ring->ring_obj = NULL;
667 ring->use_doorbell = true;
668 ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
669 if (adev->gfx.mec2_fw) {
678 ring->eop_gpu_addr = kiq->eop_gpu_addr;
679 sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
680 r = amdgpu_ring_init(adev, ring, 1024,
681 irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
683 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
687 static void gfx_v9_0_kiq_free_ring(struct amdgpu_ring *ring,
688 struct amdgpu_irq_src *irq)
690 amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
691 amdgpu_ring_fini(ring);
694 /* create MQD for each compute queue */
695 static int gfx_v9_0_compute_mqd_sw_init(struct amdgpu_device *adev)
697 struct amdgpu_ring *ring = NULL;
700 /* create MQD for KIQ */
701 ring = &adev->gfx.kiq.ring;
702 if (!ring->mqd_obj) {
703 r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
704 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
705 &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
707 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
711 /* prepare MQD backup */
712 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
713 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
714 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
717 /* create MQD for each KCQ */
718 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
719 ring = &adev->gfx.compute_ring[i];
720 if (!ring->mqd_obj) {
721 r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
722 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
723 &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
725 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
729 /* prepare MQD backup */
730 adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
731 if (!adev->gfx.mec.mqd_backup[i])
732 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
739 static void gfx_v9_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
741 struct amdgpu_ring *ring = NULL;
744 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
745 ring = &adev->gfx.compute_ring[i];
746 kfree(adev->gfx.mec.mqd_backup[i]);
747 amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
750 ring = &adev->gfx.kiq.ring;
751 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
752 amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
755 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
757 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
758 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
759 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
760 (address << SQ_IND_INDEX__INDEX__SHIFT) |
761 (SQ_IND_INDEX__FORCE_READ_MASK));
762 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
765 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
766 uint32_t wave, uint32_t thread,
767 uint32_t regno, uint32_t num, uint32_t *out)
769 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
770 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
771 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
772 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
773 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
774 (SQ_IND_INDEX__FORCE_READ_MASK) |
775 (SQ_IND_INDEX__AUTO_INCR_MASK));
777 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
780 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
782 /* type 1 wave data */
783 dst[(*no_fields)++] = 1;
784 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
785 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
786 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
787 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
788 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
789 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
790 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
791 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
792 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
793 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
794 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
795 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
796 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
797 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
800 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
801 uint32_t wave, uint32_t start,
802 uint32_t size, uint32_t *dst)
806 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
810 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
811 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
812 .select_se_sh = &gfx_v9_0_select_se_sh,
813 .read_wave_data = &gfx_v9_0_read_wave_data,
814 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
817 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
821 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
823 switch (adev->asic_type) {
825 adev->gfx.config.max_hw_contexts = 8;
826 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
827 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
828 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
829 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
830 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
833 adev->gfx.config.max_hw_contexts = 8;
834 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
835 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
836 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
837 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
838 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
845 adev->gfx.config.gb_addr_config = gb_addr_config;
847 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
849 adev->gfx.config.gb_addr_config,
853 adev->gfx.config.max_tile_pipes =
854 adev->gfx.config.gb_addr_config_fields.num_pipes;
856 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
858 adev->gfx.config.gb_addr_config,
861 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
863 adev->gfx.config.gb_addr_config,
865 MAX_COMPRESSED_FRAGS);
866 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
868 adev->gfx.config.gb_addr_config,
871 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
873 adev->gfx.config.gb_addr_config,
876 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
878 adev->gfx.config.gb_addr_config,
880 PIPE_INTERLEAVE_SIZE));
883 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
884 struct amdgpu_ngg_buf *ngg_buf,
891 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
894 size_se = size_se ? size_se : default_size_se;
896 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
897 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
898 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
903 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
906 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
911 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
915 for (i = 0; i < NGG_BUF_MAX; i++)
916 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
917 &adev->gfx.ngg.buf[i].gpu_addr,
920 memset(&adev->gfx.ngg.buf[0], 0,
921 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
923 adev->gfx.ngg.init = false;
928 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
932 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
935 /* GDS reserve memory: 64 bytes alignment */
936 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
937 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
938 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
939 adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
940 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
942 /* Primitive Buffer */
943 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
944 amdgpu_prim_buf_per_se,
947 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
951 /* Position Buffer */
952 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
953 amdgpu_pos_buf_per_se,
956 dev_err(adev->dev, "Failed to create Position Buffer\n");
960 /* Control Sideband */
961 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
962 amdgpu_cntl_sb_buf_per_se,
965 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
969 /* Parameter Cache, not created by default */
970 if (amdgpu_param_buf_per_se <= 0)
973 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
974 amdgpu_param_buf_per_se,
977 dev_err(adev->dev, "Failed to create Parameter Cache\n");
982 adev->gfx.ngg.init = true;
985 gfx_v9_0_ngg_fini(adev);
989 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
991 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1000 /* Program buffer size */
1002 size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
1003 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
1005 size = adev->gfx.ngg.buf[NGG_POS].size / 256;
1006 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
1008 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1011 size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
1012 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
1014 size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
1015 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
1017 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1019 /* Program buffer base address */
1020 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1021 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1022 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1024 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1025 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1026 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1028 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1029 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1030 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1032 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1033 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1034 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1036 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1037 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1038 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1040 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1041 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1042 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1044 /* Clear GDS reserved memory */
1045 r = amdgpu_ring_alloc(ring, 17);
1047 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1052 gfx_v9_0_write_data_to_reg(ring, 0, false,
1053 amdgpu_gds_reg_offset[0].mem_size,
1054 (adev->gds.mem.total_size +
1055 adev->gfx.ngg.gds_reserve_size) >>
1058 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1059 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1060 PACKET3_DMA_DATA_SRC_SEL(2)));
1061 amdgpu_ring_write(ring, 0);
1062 amdgpu_ring_write(ring, 0);
1063 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1064 amdgpu_ring_write(ring, 0);
1065 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1068 gfx_v9_0_write_data_to_reg(ring, 0, false,
1069 amdgpu_gds_reg_offset[0].mem_size, 0);
1071 amdgpu_ring_commit(ring);
1076 static int gfx_v9_0_sw_init(void *handle)
1079 struct amdgpu_ring *ring;
1080 struct amdgpu_kiq *kiq;
1081 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1084 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1089 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1093 /* Privileged reg */
1094 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1095 &adev->gfx.priv_reg_irq);
1099 /* Privileged inst */
1100 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1101 &adev->gfx.priv_inst_irq);
1105 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1107 gfx_v9_0_scratch_init(adev);
1109 r = gfx_v9_0_init_microcode(adev);
1111 DRM_ERROR("Failed to load gfx firmware!\n");
1115 r = gfx_v9_0_mec_init(adev);
1117 DRM_ERROR("Failed to init MEC BOs!\n");
1121 /* set up the gfx ring */
1122 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1123 ring = &adev->gfx.gfx_ring[i];
1124 ring->ring_obj = NULL;
1125 sprintf(ring->name, "gfx");
1126 ring->use_doorbell = true;
1127 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1128 r = amdgpu_ring_init(adev, ring, 1024,
1129 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1134 /* set up the compute queues */
1135 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1138 /* max 32 queues per MEC */
1139 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
1140 DRM_ERROR("Too many (%d) compute rings!\n", i);
1143 ring = &adev->gfx.compute_ring[i];
1144 ring->ring_obj = NULL;
1145 ring->use_doorbell = true;
1146 ring->doorbell_index = (AMDGPU_DOORBELL64_MEC_RING0 + i) << 1;
1147 ring->me = 1; /* first MEC */
1149 ring->queue = i % 8;
1150 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
1151 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1152 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
1153 /* type-2 packets are deprecated on MEC, use type-3 instead */
1154 r = amdgpu_ring_init(adev, ring, 1024,
1155 &adev->gfx.eop_irq, irq_type);
1160 if (amdgpu_sriov_vf(adev)) {
1161 r = gfx_v9_0_kiq_init(adev);
1163 DRM_ERROR("Failed to init KIQ BOs!\n");
1167 kiq = &adev->gfx.kiq;
1168 r = gfx_v9_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1172 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1173 r = gfx_v9_0_compute_mqd_sw_init(adev);
1178 /* reserve GDS, GWS and OA resource for gfx */
1179 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1180 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1181 &adev->gds.gds_gfx_bo, NULL, NULL);
1185 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1186 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1187 &adev->gds.gws_gfx_bo, NULL, NULL);
1191 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1192 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1193 &adev->gds.oa_gfx_bo, NULL, NULL);
1197 adev->gfx.ce_ram_size = 0x8000;
1199 gfx_v9_0_gpu_early_init(adev);
1201 r = gfx_v9_0_ngg_init(adev);
1209 static int gfx_v9_0_sw_fini(void *handle)
1212 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1215 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1216 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1218 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1219 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1220 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1221 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1223 if (amdgpu_sriov_vf(adev)) {
1224 gfx_v9_0_compute_mqd_sw_fini(adev);
1225 gfx_v9_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1226 gfx_v9_0_kiq_fini(adev);
1229 gfx_v9_0_mec_fini(adev);
1230 gfx_v9_0_ngg_fini(adev);
1236 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1241 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1243 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1245 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
1246 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1247 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1248 } else if (se_num == 0xffffffff) {
1249 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1250 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1251 } else if (sh_num == 0xffffffff) {
1252 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1253 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1255 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1256 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1258 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1261 static u32 gfx_v9_0_create_bitmask(u32 bit_width)
1263 return (u32)((1ULL << bit_width) - 1);
1266 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1270 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1271 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1273 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1274 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1276 mask = gfx_v9_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1277 adev->gfx.config.max_sh_per_se);
1279 return (~data) & mask;
1282 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1287 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1288 adev->gfx.config.max_sh_per_se;
1290 mutex_lock(&adev->grbm_idx_mutex);
1291 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1292 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1293 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1294 data = gfx_v9_0_get_rb_active_bitmap(adev);
1295 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1296 rb_bitmap_width_per_sh);
1299 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1300 mutex_unlock(&adev->grbm_idx_mutex);
1302 adev->gfx.config.backend_enable_mask = active_rbs;
1303 adev->gfx.config.num_rbs = hweight32(active_rbs);
1306 #define DEFAULT_SH_MEM_BASES (0x6000)
1307 #define FIRST_COMPUTE_VMID (8)
1308 #define LAST_COMPUTE_VMID (16)
1309 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1312 uint32_t sh_mem_config;
1313 uint32_t sh_mem_bases;
1316 * Configure apertures:
1317 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1318 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1319 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1321 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1323 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1324 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1325 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1327 mutex_lock(&adev->srbm_mutex);
1328 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1329 soc15_grbm_select(adev, 0, 0, 0, i);
1330 /* CP and shaders */
1331 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1332 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1334 soc15_grbm_select(adev, 0, 0, 0, 0);
1335 mutex_unlock(&adev->srbm_mutex);
1338 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1343 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1345 gfx_v9_0_tiling_mode_table_init(adev);
1347 gfx_v9_0_setup_rb(adev);
1348 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1350 /* XXX SH_MEM regs */
1351 /* where to put LDS, scratch, GPUVM in FSA64 space */
1352 mutex_lock(&adev->srbm_mutex);
1353 for (i = 0; i < 16; i++) {
1354 soc15_grbm_select(adev, 0, 0, 0, i);
1355 /* CP and shaders */
1357 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1358 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1359 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1360 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1362 soc15_grbm_select(adev, 0, 0, 0, 0);
1364 mutex_unlock(&adev->srbm_mutex);
1366 gfx_v9_0_init_compute_vmid(adev);
1368 mutex_lock(&adev->grbm_idx_mutex);
1370 * making sure that the following register writes will be broadcasted
1371 * to all the shaders
1373 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1375 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1376 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1377 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1378 (adev->gfx.config.sc_prim_fifo_size_backend <<
1379 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1380 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1381 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1382 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1383 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1384 mutex_unlock(&adev->grbm_idx_mutex);
1388 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1393 mutex_lock(&adev->grbm_idx_mutex);
1394 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1395 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1396 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1397 for (k = 0; k < adev->usec_timeout; k++) {
1398 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1404 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1405 mutex_unlock(&adev->grbm_idx_mutex);
1407 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1408 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1409 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1410 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1411 for (k = 0; k < adev->usec_timeout; k++) {
1412 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1418 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1421 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1426 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1427 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1428 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1429 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1431 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1434 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
1436 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1438 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1439 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1441 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
1443 gfx_v9_0_wait_for_rlc_serdes(adev);
1446 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
1448 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1450 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1454 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
1456 #ifdef AMDGPU_RLC_DEBUG_RETRY
1460 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1462 /* carrizo do enable cp interrupt after cp inited */
1463 if (!(adev->flags & AMD_IS_APU))
1464 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
1468 #ifdef AMDGPU_RLC_DEBUG_RETRY
1469 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1470 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
1471 if(rlc_ucode_ver == 0x108) {
1472 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1473 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1474 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1475 * default is 0x9C4 to create a 100us interval */
1476 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
1477 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1478 * to disable the page fault retry interrupts, default is
1480 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
1485 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
1487 const struct rlc_firmware_header_v2_0 *hdr;
1488 const __le32 *fw_data;
1489 unsigned i, fw_size;
1491 if (!adev->gfx.rlc_fw)
1494 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1495 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1497 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1498 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1499 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1501 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1502 RLCG_UCODE_LOADING_START_ADDRESS);
1503 for (i = 0; i < fw_size; i++)
1504 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1505 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1510 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
1514 if (amdgpu_sriov_vf(adev))
1517 gfx_v9_0_rlc_stop(adev);
1520 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1523 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1525 gfx_v9_0_rlc_reset(adev);
1527 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1528 /* legacy rlc firmware loading */
1529 r = gfx_v9_0_rlc_load_microcode(adev);
1534 gfx_v9_0_rlc_start(adev);
1539 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
1542 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
1544 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
1545 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
1546 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
1548 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1549 adev->gfx.gfx_ring[i].ready = false;
1551 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
1555 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
1557 const struct gfx_firmware_header_v1_0 *pfp_hdr;
1558 const struct gfx_firmware_header_v1_0 *ce_hdr;
1559 const struct gfx_firmware_header_v1_0 *me_hdr;
1560 const __le32 *fw_data;
1561 unsigned i, fw_size;
1563 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
1566 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
1567 adev->gfx.pfp_fw->data;
1568 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
1569 adev->gfx.ce_fw->data;
1570 me_hdr = (const struct gfx_firmware_header_v1_0 *)
1571 adev->gfx.me_fw->data;
1573 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
1574 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
1575 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
1577 gfx_v9_0_cp_gfx_enable(adev, false);
1580 fw_data = (const __le32 *)
1581 (adev->gfx.pfp_fw->data +
1582 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
1583 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
1584 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
1585 for (i = 0; i < fw_size; i++)
1586 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
1587 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
1590 fw_data = (const __le32 *)
1591 (adev->gfx.ce_fw->data +
1592 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
1593 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
1594 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
1595 for (i = 0; i < fw_size; i++)
1596 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
1597 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
1600 fw_data = (const __le32 *)
1601 (adev->gfx.me_fw->data +
1602 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
1603 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
1604 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
1605 for (i = 0; i < fw_size; i++)
1606 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
1607 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
1612 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1615 const struct cs_section_def *sect = NULL;
1616 const struct cs_extent_def *ext = NULL;
1618 /* begin clear state */
1620 /* context control state */
1623 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1624 for (ext = sect->section; ext->extent != NULL; ++ext) {
1625 if (sect->id == SECT_CONTEXT)
1626 count += 2 + ext->reg_count;
1631 /* pa_sc_raster_config/pa_sc_raster_config1 */
1633 /* end clear state */
1641 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
1643 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1644 const struct cs_section_def *sect = NULL;
1645 const struct cs_extent_def *ext = NULL;
1649 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
1650 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
1652 gfx_v9_0_cp_gfx_enable(adev, true);
1654 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
1656 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
1660 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1661 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1663 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1664 amdgpu_ring_write(ring, 0x80000000);
1665 amdgpu_ring_write(ring, 0x80000000);
1667 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1668 for (ext = sect->section; ext->extent != NULL; ++ext) {
1669 if (sect->id == SECT_CONTEXT) {
1670 amdgpu_ring_write(ring,
1671 PACKET3(PACKET3_SET_CONTEXT_REG,
1673 amdgpu_ring_write(ring,
1674 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
1675 for (i = 0; i < ext->reg_count; i++)
1676 amdgpu_ring_write(ring, ext->extent[i]);
1681 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1682 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1684 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1685 amdgpu_ring_write(ring, 0);
1687 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
1688 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
1689 amdgpu_ring_write(ring, 0x8000);
1690 amdgpu_ring_write(ring, 0x8000);
1692 amdgpu_ring_commit(ring);
1697 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
1699 struct amdgpu_ring *ring;
1702 u64 rb_addr, rptr_addr, wptr_gpu_addr;
1704 /* Set the write pointer delay */
1705 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
1707 /* set the RB to use vmid 0 */
1708 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
1710 /* Set ring buffer size */
1711 ring = &adev->gfx.gfx_ring[0];
1712 rb_bufsz = order_base_2(ring->ring_size / 8);
1713 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
1714 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
1716 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
1718 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
1720 /* Initialize the ring buffer's write pointers */
1722 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
1723 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
1725 /* set the wb address wether it's enabled or not */
1726 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1727 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
1728 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
1730 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1731 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
1732 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
1735 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
1737 rb_addr = ring->gpu_addr >> 8;
1738 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
1739 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
1741 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
1742 if (ring->use_doorbell) {
1743 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
1744 DOORBELL_OFFSET, ring->doorbell_index);
1745 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
1748 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
1750 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
1752 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
1753 DOORBELL_RANGE_LOWER, ring->doorbell_index);
1754 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
1756 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
1757 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
1760 /* start the ring */
1761 gfx_v9_0_cp_gfx_start(adev);
1767 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
1772 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
1774 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
1775 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1776 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1777 adev->gfx.compute_ring[i].ready = false;
1778 adev->gfx.kiq.ring.ready = false;
1783 static int gfx_v9_0_cp_compute_start(struct amdgpu_device *adev)
1785 gfx_v9_0_cp_compute_enable(adev, true);
1790 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
1792 const struct gfx_firmware_header_v1_0 *mec_hdr;
1793 const __le32 *fw_data;
1797 if (!adev->gfx.mec_fw)
1800 gfx_v9_0_cp_compute_enable(adev, false);
1802 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1803 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1805 fw_data = (const __le32 *)
1806 (adev->gfx.mec_fw->data +
1807 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1809 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1810 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1811 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
1813 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
1814 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1815 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
1816 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1819 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
1820 mec_hdr->jt_offset);
1821 for (i = 0; i < mec_hdr->jt_size; i++)
1822 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
1823 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1825 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
1826 adev->gfx.mec_fw_version);
1827 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1832 static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev)
1836 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1837 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
1839 if (ring->mqd_obj) {
1840 r = amdgpu_bo_reserve(ring->mqd_obj, true);
1841 if (unlikely(r != 0))
1842 dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
1844 amdgpu_bo_unpin(ring->mqd_obj);
1845 amdgpu_bo_unreserve(ring->mqd_obj);
1847 amdgpu_bo_unref(&ring->mqd_obj);
1848 ring->mqd_obj = NULL;
1853 static int gfx_v9_0_init_queue(struct amdgpu_ring *ring);
1855 static int gfx_v9_0_cp_compute_resume(struct amdgpu_device *adev)
1858 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1859 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
1860 if (gfx_v9_0_init_queue(ring))
1861 dev_warn(adev->dev, "compute queue %d init failed!\n", i);
1864 r = gfx_v9_0_cp_compute_start(adev);
1872 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
1875 struct amdgpu_device *adev = ring->adev;
1877 /* tell RLC which is KIQ queue */
1878 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
1880 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1881 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
1883 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
1886 static int gfx_v9_0_kiq_enable(struct amdgpu_ring *ring)
1888 struct amdgpu_device *adev = ring->adev;
1889 uint32_t scratch, tmp = 0;
1892 r = amdgpu_gfx_scratch_get(adev, &scratch);
1894 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
1897 WREG32(scratch, 0xCAFEDEAD);
1899 r = amdgpu_ring_alloc(ring, 8);
1901 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
1902 amdgpu_gfx_scratch_free(adev, scratch);
1905 amdgpu_ring_alloc(ring, 11);
1907 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_RESOURCES, 6));
1908 amdgpu_ring_write(ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
1909 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
1910 amdgpu_ring_write(ring, 0x000000FF); /* queue mask lo */
1911 amdgpu_ring_write(ring, 0); /* queue mask hi */
1912 amdgpu_ring_write(ring, 0); /* gws mask lo */
1913 amdgpu_ring_write(ring, 0); /* gws mask hi */
1914 amdgpu_ring_write(ring, 0); /* oac mask */
1915 amdgpu_ring_write(ring, 0); /* gds heap base:0, gds heap size:0 */
1916 /* write to scratch for completion */
1917 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1918 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1919 amdgpu_ring_write(ring, 0xDEADBEEF);
1920 amdgpu_ring_commit(ring);
1922 for (i = 0; i < adev->usec_timeout; i++) {
1923 tmp = RREG32(scratch);
1924 if (tmp == 0xDEADBEEF)
1928 if (i >= adev->usec_timeout) {
1929 DRM_ERROR("KIQ enable failed (scratch(0x%04X)=0x%08X)\n",
1933 amdgpu_gfx_scratch_free(adev, scratch);
1938 static int gfx_v9_0_map_queue_enable(struct amdgpu_ring *kiq_ring,
1939 struct amdgpu_ring *ring)
1941 struct amdgpu_device *adev = kiq_ring->adev;
1942 uint64_t mqd_addr, wptr_addr;
1943 uint32_t scratch, tmp = 0;
1946 r = amdgpu_gfx_scratch_get(adev, &scratch);
1948 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
1951 WREG32(scratch, 0xCAFEDEAD);
1953 r = amdgpu_ring_alloc(kiq_ring, 10);
1955 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
1956 amdgpu_gfx_scratch_free(adev, scratch);
1960 mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
1961 wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1963 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
1964 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
1965 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
1966 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
1967 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
1968 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
1969 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
1970 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
1971 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
1972 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
1973 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
1974 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
1975 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
1976 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
1977 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
1978 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
1979 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
1980 /* write to scratch for completion */
1981 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1982 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1983 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
1984 amdgpu_ring_commit(kiq_ring);
1986 for (i = 0; i < adev->usec_timeout; i++) {
1987 tmp = RREG32(scratch);
1988 if (tmp == 0xDEADBEEF)
1992 if (i >= adev->usec_timeout) {
1993 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
1997 amdgpu_gfx_scratch_free(adev, scratch);
2002 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2004 struct amdgpu_device *adev = ring->adev;
2005 struct v9_mqd *mqd = ring->mqd_ptr;
2006 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2009 mqd->header = 0xC0310800;
2010 mqd->compute_pipelinestat_enable = 0x00000001;
2011 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2012 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2013 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2014 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2015 mqd->compute_misc_reserved = 0x00000003;
2017 eop_base_addr = ring->eop_gpu_addr >> 8;
2018 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2019 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2021 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2022 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2023 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2024 (order_base_2(MEC_HPD_SIZE / 4) - 1));
2026 mqd->cp_hqd_eop_control = tmp;
2028 /* enable doorbell? */
2029 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2031 if (ring->use_doorbell) {
2032 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2033 DOORBELL_OFFSET, ring->doorbell_index);
2034 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2036 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2037 DOORBELL_SOURCE, 0);
2038 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2042 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2045 mqd->cp_hqd_pq_doorbell_control = tmp;
2047 /* disable the queue if it's active */
2049 mqd->cp_hqd_dequeue_request = 0;
2050 mqd->cp_hqd_pq_rptr = 0;
2051 mqd->cp_hqd_pq_wptr_lo = 0;
2052 mqd->cp_hqd_pq_wptr_hi = 0;
2054 /* set the pointer to the MQD */
2055 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2056 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2058 /* set MQD vmid to 0 */
2059 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2060 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2061 mqd->cp_mqd_control = tmp;
2063 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2064 hqd_gpu_addr = ring->gpu_addr >> 8;
2065 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2066 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2068 /* set up the HQD, this is similar to CP_RB0_CNTL */
2069 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2070 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2071 (order_base_2(ring->ring_size / 4) - 1));
2072 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2073 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2075 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2077 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2078 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2079 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2080 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2081 mqd->cp_hqd_pq_control = tmp;
2083 /* set the wb address whether it's enabled or not */
2084 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2085 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2086 mqd->cp_hqd_pq_rptr_report_addr_hi =
2087 upper_32_bits(wb_gpu_addr) & 0xffff;
2089 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2090 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2091 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2092 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2095 /* enable the doorbell if requested */
2096 if (ring->use_doorbell) {
2097 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2098 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2099 DOORBELL_OFFSET, ring->doorbell_index);
2101 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2103 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2104 DOORBELL_SOURCE, 0);
2105 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2109 mqd->cp_hqd_pq_doorbell_control = tmp;
2111 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2113 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2115 /* set the vmid for the queue */
2116 mqd->cp_hqd_vmid = 0;
2118 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2119 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2120 mqd->cp_hqd_persistent_state = tmp;
2122 /* set MIN_IB_AVAIL_SIZE */
2123 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2124 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2125 mqd->cp_hqd_ib_control = tmp;
2127 /* activate the queue */
2128 mqd->cp_hqd_active = 1;
2133 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2135 struct amdgpu_device *adev = ring->adev;
2136 struct v9_mqd *mqd = ring->mqd_ptr;
2139 /* disable wptr polling */
2140 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2142 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2143 mqd->cp_hqd_eop_base_addr_lo);
2144 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2145 mqd->cp_hqd_eop_base_addr_hi);
2147 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2148 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2149 mqd->cp_hqd_eop_control);
2151 /* enable doorbell? */
2152 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2153 mqd->cp_hqd_pq_doorbell_control);
2155 /* disable the queue if it's active */
2156 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2157 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2158 for (j = 0; j < adev->usec_timeout; j++) {
2159 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2163 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2164 mqd->cp_hqd_dequeue_request);
2165 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2166 mqd->cp_hqd_pq_rptr);
2167 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2168 mqd->cp_hqd_pq_wptr_lo);
2169 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2170 mqd->cp_hqd_pq_wptr_hi);
2173 /* set the pointer to the MQD */
2174 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2175 mqd->cp_mqd_base_addr_lo);
2176 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2177 mqd->cp_mqd_base_addr_hi);
2179 /* set MQD vmid to 0 */
2180 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2181 mqd->cp_mqd_control);
2183 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2184 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2185 mqd->cp_hqd_pq_base_lo);
2186 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2187 mqd->cp_hqd_pq_base_hi);
2189 /* set up the HQD, this is similar to CP_RB0_CNTL */
2190 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2191 mqd->cp_hqd_pq_control);
2193 /* set the wb address whether it's enabled or not */
2194 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2195 mqd->cp_hqd_pq_rptr_report_addr_lo);
2196 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2197 mqd->cp_hqd_pq_rptr_report_addr_hi);
2199 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2200 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2201 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2202 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2203 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2205 /* enable the doorbell if requested */
2206 if (ring->use_doorbell) {
2207 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2208 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2209 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2210 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2213 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2214 mqd->cp_hqd_pq_doorbell_control);
2216 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2217 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2218 mqd->cp_hqd_pq_wptr_lo);
2219 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2220 mqd->cp_hqd_pq_wptr_hi);
2222 /* set the vmid for the queue */
2223 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2225 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2226 mqd->cp_hqd_persistent_state);
2228 /* activate the queue */
2229 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2230 mqd->cp_hqd_active);
2232 if (ring->use_doorbell)
2233 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2238 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2240 struct amdgpu_device *adev = ring->adev;
2241 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
2242 struct v9_mqd *mqd = ring->mqd_ptr;
2243 bool is_kiq = (ring->funcs->type == AMDGPU_RING_TYPE_KIQ);
2244 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2248 gfx_v9_0_kiq_setting(&kiq->ring);
2250 mqd_idx = ring - &adev->gfx.compute_ring[0];
2253 if (!adev->gfx.in_reset) {
2254 memset((void *)mqd, 0, sizeof(*mqd));
2255 mutex_lock(&adev->srbm_mutex);
2256 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2257 gfx_v9_0_mqd_init(ring);
2259 gfx_v9_0_kiq_init_register(ring);
2260 soc15_grbm_select(adev, 0, 0, 0, 0);
2261 mutex_unlock(&adev->srbm_mutex);
2263 if (adev->gfx.mec.mqd_backup[mqd_idx])
2264 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2265 } else { /* for GPU_RESET case */
2266 /* reset MQD to a clean status */
2267 if (adev->gfx.mec.mqd_backup[mqd_idx])
2268 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2270 /* reset ring buffer */
2272 amdgpu_ring_clear_ring(ring);
2275 mutex_lock(&adev->srbm_mutex);
2276 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2277 gfx_v9_0_kiq_init_register(ring);
2278 soc15_grbm_select(adev, 0, 0, 0, 0);
2279 mutex_unlock(&adev->srbm_mutex);
2284 r = gfx_v9_0_kiq_enable(ring);
2286 r = gfx_v9_0_map_queue_enable(&kiq->ring, ring);
2291 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2293 struct amdgpu_ring *ring = NULL;
2296 gfx_v9_0_cp_compute_enable(adev, true);
2298 ring = &adev->gfx.kiq.ring;
2300 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2301 if (unlikely(r != 0))
2304 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2306 r = gfx_v9_0_kiq_init_queue(ring);
2307 amdgpu_bo_kunmap(ring->mqd_obj);
2308 ring->mqd_ptr = NULL;
2310 amdgpu_bo_unreserve(ring->mqd_obj);
2314 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2315 ring = &adev->gfx.compute_ring[i];
2317 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2318 if (unlikely(r != 0))
2320 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2322 r = gfx_v9_0_kiq_init_queue(ring);
2323 amdgpu_bo_kunmap(ring->mqd_obj);
2324 ring->mqd_ptr = NULL;
2326 amdgpu_bo_unreserve(ring->mqd_obj);
2335 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2338 struct amdgpu_ring *ring;
2340 if (!(adev->flags & AMD_IS_APU))
2341 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2343 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2344 /* legacy firmware loading */
2345 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2349 r = gfx_v9_0_cp_compute_load_microcode(adev);
2354 r = gfx_v9_0_cp_gfx_resume(adev);
2358 if (amdgpu_sriov_vf(adev))
2359 r = gfx_v9_0_kiq_resume(adev);
2361 r = gfx_v9_0_cp_compute_resume(adev);
2365 ring = &adev->gfx.gfx_ring[0];
2366 r = amdgpu_ring_test_ring(ring);
2368 ring->ready = false;
2371 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2372 ring = &adev->gfx.compute_ring[i];
2375 r = amdgpu_ring_test_ring(ring);
2377 ring->ready = false;
2380 if (amdgpu_sriov_vf(adev)) {
2381 ring = &adev->gfx.kiq.ring;
2383 r = amdgpu_ring_test_ring(ring);
2385 ring->ready = false;
2388 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2393 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2395 gfx_v9_0_cp_gfx_enable(adev, enable);
2396 gfx_v9_0_cp_compute_enable(adev, enable);
2399 static int gfx_v9_0_hw_init(void *handle)
2402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2404 gfx_v9_0_init_golden_registers(adev);
2406 gfx_v9_0_gpu_init(adev);
2408 r = gfx_v9_0_rlc_resume(adev);
2412 r = gfx_v9_0_cp_resume(adev);
2416 r = gfx_v9_0_ngg_en(adev);
2423 static int gfx_v9_0_hw_fini(void *handle)
2425 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2427 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2428 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2429 if (amdgpu_sriov_vf(adev)) {
2430 pr_debug("For SRIOV client, shouldn't do anything.\n");
2433 gfx_v9_0_cp_enable(adev, false);
2434 gfx_v9_0_rlc_stop(adev);
2435 gfx_v9_0_cp_compute_fini(adev);
2440 static int gfx_v9_0_suspend(void *handle)
2442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2444 return gfx_v9_0_hw_fini(adev);
2447 static int gfx_v9_0_resume(void *handle)
2449 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2451 return gfx_v9_0_hw_init(adev);
2454 static bool gfx_v9_0_is_idle(void *handle)
2456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2458 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
2459 GRBM_STATUS, GUI_ACTIVE))
2465 static int gfx_v9_0_wait_for_idle(void *handle)
2469 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2471 for (i = 0; i < adev->usec_timeout; i++) {
2472 /* read MC_STATUS */
2473 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
2474 GRBM_STATUS__GUI_ACTIVE_MASK;
2476 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
2483 static int gfx_v9_0_soft_reset(void *handle)
2485 u32 grbm_soft_reset = 0;
2487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2490 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
2491 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2492 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2493 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2494 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2495 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2496 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2497 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2498 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2499 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2500 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2503 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2504 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2505 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2509 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
2510 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2511 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2512 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2515 if (grbm_soft_reset) {
2517 gfx_v9_0_rlc_stop(adev);
2519 /* Disable GFX parsing/prefetching */
2520 gfx_v9_0_cp_gfx_enable(adev, false);
2522 /* Disable MEC parsing/prefetching */
2523 gfx_v9_0_cp_compute_enable(adev, false);
2525 if (grbm_soft_reset) {
2526 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2527 tmp |= grbm_soft_reset;
2528 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2529 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2530 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2534 tmp &= ~grbm_soft_reset;
2535 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2536 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2539 /* Wait a little for things to settle down */
2545 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2549 mutex_lock(&adev->gfx.gpu_clock_mutex);
2550 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
2551 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
2552 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2553 mutex_unlock(&adev->gfx.gpu_clock_mutex);
2557 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
2559 uint32_t gds_base, uint32_t gds_size,
2560 uint32_t gws_base, uint32_t gws_size,
2561 uint32_t oa_base, uint32_t oa_size)
2563 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
2564 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
2566 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
2567 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
2569 oa_base = oa_base >> AMDGPU_OA_SHIFT;
2570 oa_size = oa_size >> AMDGPU_OA_SHIFT;
2573 gfx_v9_0_write_data_to_reg(ring, 0, false,
2574 amdgpu_gds_reg_offset[vmid].mem_base,
2578 gfx_v9_0_write_data_to_reg(ring, 0, false,
2579 amdgpu_gds_reg_offset[vmid].mem_size,
2583 gfx_v9_0_write_data_to_reg(ring, 0, false,
2584 amdgpu_gds_reg_offset[vmid].gws,
2585 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2588 gfx_v9_0_write_data_to_reg(ring, 0, false,
2589 amdgpu_gds_reg_offset[vmid].oa,
2590 (1 << (oa_size + oa_base)) - (1 << oa_base));
2593 static int gfx_v9_0_early_init(void *handle)
2595 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2597 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
2598 adev->gfx.num_compute_rings = GFX9_NUM_COMPUTE_RINGS;
2599 gfx_v9_0_set_ring_funcs(adev);
2600 gfx_v9_0_set_irq_funcs(adev);
2601 gfx_v9_0_set_gds_init(adev);
2602 gfx_v9_0_set_rlc_funcs(adev);
2607 static int gfx_v9_0_late_init(void *handle)
2609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2612 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2616 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2623 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
2625 uint32_t rlc_setting, data;
2628 if (adev->gfx.rlc.in_safe_mode)
2631 /* if RLC is not enabled, do nothing */
2632 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2633 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
2636 if (adev->cg_flags &
2637 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
2638 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
2639 data = RLC_SAFE_MODE__CMD_MASK;
2640 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
2641 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
2643 /* wait for RLC_SAFE_MODE */
2644 for (i = 0; i < adev->usec_timeout; i++) {
2645 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
2649 adev->gfx.rlc.in_safe_mode = true;
2653 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
2655 uint32_t rlc_setting, data;
2657 if (!adev->gfx.rlc.in_safe_mode)
2660 /* if RLC is not enabled, do nothing */
2661 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2662 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
2665 if (adev->cg_flags &
2666 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
2668 * Try to exit safe mode only if it is already in safe
2671 data = RLC_SAFE_MODE__CMD_MASK;
2672 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
2673 adev->gfx.rlc.in_safe_mode = false;
2677 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2682 /* It is disabled by HW by default */
2683 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2684 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2685 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
2686 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
2687 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2688 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2689 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2691 /* only for Vega10 & Raven1 */
2692 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
2695 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
2697 /* MGLS is a global flag to control all MGLS in GFX */
2698 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2699 /* 2 - RLC memory Light sleep */
2700 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2701 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
2702 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2704 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
2706 /* 3 - CP memory Light sleep */
2707 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2708 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
2709 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2711 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
2715 /* 1 - MGCG_OVERRIDE */
2716 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
2717 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
2718 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2719 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2720 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2721 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2723 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
2725 /* 2 - disable MGLS in RLC */
2726 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
2727 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2728 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2729 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
2732 /* 3 - disable MGLS in CP */
2733 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
2734 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2735 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2736 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
2741 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
2746 adev->gfx.rlc.funcs->enter_safe_mode(adev);
2748 /* Enable 3D CGCG/CGLS */
2749 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
2750 /* write cmd to clear cgcg/cgls ov */
2751 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
2752 /* unset CGCG override */
2753 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
2754 /* update CGCG and CGLS override bits */
2756 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
2757 /* enable 3Dcgcg FSM(0x0020003f) */
2758 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
2759 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2760 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
2761 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
2762 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2763 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
2765 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
2767 /* set IDLE_POLL_COUNT(0x00900100) */
2768 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
2769 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2770 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2772 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
2774 /* Disable CGCG/CGLS */
2775 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
2776 /* disable cgcg, cgls should be disabled */
2777 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
2778 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
2779 /* disable cgcg and cgls in FSM */
2781 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
2784 adev->gfx.rlc.funcs->exit_safe_mode(adev);
2787 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2792 adev->gfx.rlc.funcs->enter_safe_mode(adev);
2794 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2795 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
2796 /* unset CGCG override */
2797 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2798 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2799 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2801 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2802 /* update CGCG and CGLS override bits */
2804 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
2806 /* enable cgcg FSM(0x0020003F) */
2807 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
2808 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2809 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2810 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2811 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2812 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2814 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
2816 /* set IDLE_POLL_COUNT(0x00900100) */
2817 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
2818 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2819 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2821 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
2823 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
2824 /* reset CGCG/CGLS bits */
2825 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2826 /* disable cgcg and cgls in FSM */
2828 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
2831 adev->gfx.rlc.funcs->exit_safe_mode(adev);
2834 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
2838 /* CGCG/CGLS should be enabled after MGCG/MGLS
2839 * === MGCG + MGLS ===
2841 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
2842 /* === CGCG /CGLS for GFX 3D Only === */
2843 gfx_v9_0_update_3d_clock_gating(adev, enable);
2844 /* === CGCG + CGLS === */
2845 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
2847 /* CGCG/CGLS should be disabled before MGCG/MGLS
2848 * === CGCG + CGLS ===
2850 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
2851 /* === CGCG /CGLS for GFX 3D Only === */
2852 gfx_v9_0_update_3d_clock_gating(adev, enable);
2853 /* === MGCG + MGLS === */
2854 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
2859 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
2860 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
2861 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
2864 static int gfx_v9_0_set_powergating_state(void *handle,
2865 enum amd_powergating_state state)
2870 static int gfx_v9_0_set_clockgating_state(void *handle,
2871 enum amd_clockgating_state state)
2873 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2875 if (amdgpu_sriov_vf(adev))
2878 switch (adev->asic_type) {
2881 gfx_v9_0_update_gfx_clock_gating(adev,
2882 state == AMD_CG_STATE_GATE ? true : false);
2890 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
2892 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2895 if (amdgpu_sriov_vf(adev))
2898 /* AMD_CG_SUPPORT_GFX_MGCG */
2899 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
2900 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2901 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2903 /* AMD_CG_SUPPORT_GFX_CGCG */
2904 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
2905 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2906 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2908 /* AMD_CG_SUPPORT_GFX_CGLS */
2909 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2910 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2912 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2913 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
2914 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2915 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2917 /* AMD_CG_SUPPORT_GFX_CP_LS */
2918 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
2919 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2920 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2922 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
2923 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
2924 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
2925 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
2927 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
2928 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
2929 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
2932 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
2934 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
2937 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2939 struct amdgpu_device *adev = ring->adev;
2942 /* XXX check if swapping is necessary on BE */
2943 if (ring->use_doorbell) {
2944 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
2946 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
2947 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
2953 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2955 struct amdgpu_device *adev = ring->adev;
2957 if (ring->use_doorbell) {
2958 /* XXX check if swapping is necessary on BE */
2959 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2960 WDOORBELL64(ring->doorbell_index, ring->wptr);
2962 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2963 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2967 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2969 u32 ref_and_mask, reg_mem_engine;
2970 struct nbio_hdp_flush_reg *nbio_hf_reg;
2972 if (ring->adev->asic_type == CHIP_VEGA10)
2973 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
2975 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2978 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2981 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2988 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2989 reg_mem_engine = 1; /* pfp */
2992 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2993 nbio_hf_reg->hdp_flush_req_offset,
2994 nbio_hf_reg->hdp_flush_done_offset,
2995 ref_and_mask, ref_and_mask, 0x20);
2998 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3000 gfx_v9_0_write_data_to_reg(ring, 0, true,
3001 SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
3004 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3005 struct amdgpu_ib *ib,
3006 unsigned vm_id, bool ctx_switch)
3008 u32 header, control = 0;
3010 if (ib->flags & AMDGPU_IB_FLAG_CE)
3011 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3013 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3015 control |= ib->length_dw | (vm_id << 24);
3017 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3018 control |= INDIRECT_BUFFER_PRE_ENB(1);
3020 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3021 gfx_v9_0_ring_emit_de_meta(ring);
3024 amdgpu_ring_write(ring, header);
3025 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3026 amdgpu_ring_write(ring,
3030 lower_32_bits(ib->gpu_addr));
3031 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3032 amdgpu_ring_write(ring, control);
3035 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3036 struct amdgpu_ib *ib,
3037 unsigned vm_id, bool ctx_switch)
3039 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3041 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3042 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3043 amdgpu_ring_write(ring,
3047 lower_32_bits(ib->gpu_addr));
3048 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3049 amdgpu_ring_write(ring, control);
3052 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3053 u64 seq, unsigned flags)
3055 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3056 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3058 /* RELEASE_MEM - flush caches, send int */
3059 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3060 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3062 EOP_TC_WB_ACTION_EN |
3063 EOP_TC_MD_ACTION_EN |
3064 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3066 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3069 * the address should be Qword aligned if 64bit write, Dword
3070 * aligned if only send 32bit data low (discard data high)
3076 amdgpu_ring_write(ring, lower_32_bits(addr));
3077 amdgpu_ring_write(ring, upper_32_bits(addr));
3078 amdgpu_ring_write(ring, lower_32_bits(seq));
3079 amdgpu_ring_write(ring, upper_32_bits(seq));
3080 amdgpu_ring_write(ring, 0);
3083 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3085 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3086 uint32_t seq = ring->fence_drv.sync_seq;
3087 uint64_t addr = ring->fence_drv.gpu_addr;
3089 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3090 lower_32_bits(addr), upper_32_bits(addr),
3091 seq, 0xffffffff, 4);
3094 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3095 unsigned vm_id, uint64_t pd_addr)
3097 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3098 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3099 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3100 unsigned eng = ring->vm_inv_eng;
3102 pd_addr = pd_addr | 0x1; /* valid bit */
3103 /* now only use physical base address of PDE and valid */
3104 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
3106 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3107 hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3108 lower_32_bits(pd_addr));
3110 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3111 hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3112 upper_32_bits(pd_addr));
3114 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3115 hub->vm_inv_eng0_req + eng, req);
3117 /* wait for the invalidate to complete */
3118 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3119 eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3121 /* compute doesn't have PFP */
3123 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3124 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3125 amdgpu_ring_write(ring, 0x0);
3129 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3131 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3134 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3138 /* XXX check if swapping is necessary on BE */
3139 if (ring->use_doorbell)
3140 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3146 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3148 struct amdgpu_device *adev = ring->adev;
3150 /* XXX check if swapping is necessary on BE */
3151 if (ring->use_doorbell) {
3152 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3153 WDOORBELL64(ring->doorbell_index, ring->wptr);
3155 BUG(); /* only DOORBELL method supported on gfx9 now */
3159 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3160 u64 seq, unsigned int flags)
3162 /* we only allocate 32bit for each seq wb address */
3163 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3165 /* write fence seq to the "addr" */
3166 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3167 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3168 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3169 amdgpu_ring_write(ring, lower_32_bits(addr));
3170 amdgpu_ring_write(ring, upper_32_bits(addr));
3171 amdgpu_ring_write(ring, lower_32_bits(seq));
3173 if (flags & AMDGPU_FENCE_FLAG_INT) {
3174 /* set register to trigger INT */
3175 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3176 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3177 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3178 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3179 amdgpu_ring_write(ring, 0);
3180 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3184 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3186 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3187 amdgpu_ring_write(ring, 0);
3190 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3192 static struct v9_ce_ib_state ce_payload = {0};
3196 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3197 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3199 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3200 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3201 WRITE_DATA_DST_SEL(8) |
3203 WRITE_DATA_CACHE_POLICY(0));
3204 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3205 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3206 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3209 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3211 static struct v9_de_ib_state de_payload = {0};
3212 uint64_t csa_addr, gds_addr;
3215 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3216 gds_addr = csa_addr + 4096;
3217 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3218 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3220 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3221 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3222 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3223 WRITE_DATA_DST_SEL(8) |
3225 WRITE_DATA_CACHE_POLICY(0));
3226 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3227 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3228 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3231 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3235 if (amdgpu_sriov_vf(ring->adev))
3236 gfx_v9_0_ring_emit_ce_meta(ring);
3238 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3239 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3240 /* set load_global_config & load_global_uconfig */
3242 /* set load_cs_sh_regs */
3244 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3247 /* set load_ce_ram if preamble presented */
3248 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3251 /* still load_ce_ram if this is the first time preamble presented
3252 * although there is no context switch happens.
3254 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3258 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3259 amdgpu_ring_write(ring, dw2);
3260 amdgpu_ring_write(ring, 0);
3263 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3266 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3267 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3268 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3269 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3270 ret = ring->wptr & ring->buf_mask;
3271 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3275 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3278 BUG_ON(offset > ring->buf_mask);
3279 BUG_ON(ring->ring[offset] != 0x55aa55aa);
3281 cur = (ring->wptr & ring->buf_mask) - 1;
3282 if (likely(cur > offset))
3283 ring->ring[offset] = cur - offset;
3285 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3288 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3290 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3291 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3294 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3296 struct amdgpu_device *adev = ring->adev;
3298 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3299 amdgpu_ring_write(ring, 0 | /* src: register*/
3300 (5 << 8) | /* dst: memory */
3301 (1 << 20)); /* write confirm */
3302 amdgpu_ring_write(ring, reg);
3303 amdgpu_ring_write(ring, 0);
3304 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3305 adev->virt.reg_val_offs * 4));
3306 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3307 adev->virt.reg_val_offs * 4));
3310 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3313 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3314 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3315 amdgpu_ring_write(ring, reg);
3316 amdgpu_ring_write(ring, 0);
3317 amdgpu_ring_write(ring, val);
3320 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3321 enum amdgpu_interrupt_state state)
3324 case AMDGPU_IRQ_STATE_DISABLE:
3325 case AMDGPU_IRQ_STATE_ENABLE:
3326 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3327 TIME_STAMP_INT_ENABLE,
3328 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3335 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3337 enum amdgpu_interrupt_state state)
3339 u32 mec_int_cntl, mec_int_cntl_reg;
3342 * amdgpu controls only pipe 0 of MEC1. That's why this function only
3343 * handles the setting of interrupts for this specific pipe. All other
3344 * pipes' interrupts are set by amdkfd.
3350 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
3353 DRM_DEBUG("invalid pipe %d\n", pipe);
3357 DRM_DEBUG("invalid me %d\n", me);
3362 case AMDGPU_IRQ_STATE_DISABLE:
3363 mec_int_cntl = RREG32(mec_int_cntl_reg);
3364 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3365 TIME_STAMP_INT_ENABLE, 0);
3366 WREG32(mec_int_cntl_reg, mec_int_cntl);
3368 case AMDGPU_IRQ_STATE_ENABLE:
3369 mec_int_cntl = RREG32(mec_int_cntl_reg);
3370 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3371 TIME_STAMP_INT_ENABLE, 1);
3372 WREG32(mec_int_cntl_reg, mec_int_cntl);
3379 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3380 struct amdgpu_irq_src *source,
3382 enum amdgpu_interrupt_state state)
3385 case AMDGPU_IRQ_STATE_DISABLE:
3386 case AMDGPU_IRQ_STATE_ENABLE:
3387 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3388 PRIV_REG_INT_ENABLE,
3389 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3398 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3399 struct amdgpu_irq_src *source,
3401 enum amdgpu_interrupt_state state)
3404 case AMDGPU_IRQ_STATE_DISABLE:
3405 case AMDGPU_IRQ_STATE_ENABLE:
3406 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3407 PRIV_INSTR_INT_ENABLE,
3408 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3416 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
3417 struct amdgpu_irq_src *src,
3419 enum amdgpu_interrupt_state state)
3422 case AMDGPU_CP_IRQ_GFX_EOP:
3423 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
3425 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3426 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
3428 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3429 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
3431 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3432 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
3434 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3435 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
3437 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3438 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
3440 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3441 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
3443 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3444 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
3446 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3447 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
3455 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
3456 struct amdgpu_irq_src *source,
3457 struct amdgpu_iv_entry *entry)
3460 u8 me_id, pipe_id, queue_id;
3461 struct amdgpu_ring *ring;
3463 DRM_DEBUG("IH: CP EOP\n");
3464 me_id = (entry->ring_id & 0x0c) >> 2;
3465 pipe_id = (entry->ring_id & 0x03) >> 0;
3466 queue_id = (entry->ring_id & 0x70) >> 4;
3470 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3474 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3475 ring = &adev->gfx.compute_ring[i];
3476 /* Per-queue interrupt is supported for MEC starting from VI.
3477 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3479 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3480 amdgpu_fence_process(ring);
3487 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
3488 struct amdgpu_irq_src *source,
3489 struct amdgpu_iv_entry *entry)
3491 DRM_ERROR("Illegal register access in command stream\n");
3492 schedule_work(&adev->reset_work);
3496 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
3497 struct amdgpu_irq_src *source,
3498 struct amdgpu_iv_entry *entry)
3500 DRM_ERROR("Illegal instruction in command stream\n");
3501 schedule_work(&adev->reset_work);
3505 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
3506 struct amdgpu_irq_src *src,
3508 enum amdgpu_interrupt_state state)
3510 uint32_t tmp, target;
3511 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
3514 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
3516 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
3517 target += ring->pipe;
3520 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
3521 if (state == AMDGPU_IRQ_STATE_DISABLE) {
3522 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
3523 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
3524 GENERIC2_INT_ENABLE, 0);
3525 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
3527 tmp = RREG32(target);
3528 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
3529 GENERIC2_INT_ENABLE, 0);
3530 WREG32(target, tmp);
3532 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
3533 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
3534 GENERIC2_INT_ENABLE, 1);
3535 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
3537 tmp = RREG32(target);
3538 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
3539 GENERIC2_INT_ENABLE, 1);
3540 WREG32(target, tmp);
3544 BUG(); /* kiq only support GENERIC2_INT now */
3550 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
3551 struct amdgpu_irq_src *source,
3552 struct amdgpu_iv_entry *entry)
3554 u8 me_id, pipe_id, queue_id;
3555 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
3557 me_id = (entry->ring_id & 0x0c) >> 2;
3558 pipe_id = (entry->ring_id & 0x03) >> 0;
3559 queue_id = (entry->ring_id & 0x70) >> 4;
3560 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
3561 me_id, pipe_id, queue_id);
3563 amdgpu_fence_process(ring);
3567 const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
3569 .early_init = gfx_v9_0_early_init,
3570 .late_init = gfx_v9_0_late_init,
3571 .sw_init = gfx_v9_0_sw_init,
3572 .sw_fini = gfx_v9_0_sw_fini,
3573 .hw_init = gfx_v9_0_hw_init,
3574 .hw_fini = gfx_v9_0_hw_fini,
3575 .suspend = gfx_v9_0_suspend,
3576 .resume = gfx_v9_0_resume,
3577 .is_idle = gfx_v9_0_is_idle,
3578 .wait_for_idle = gfx_v9_0_wait_for_idle,
3579 .soft_reset = gfx_v9_0_soft_reset,
3580 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
3581 .set_powergating_state = gfx_v9_0_set_powergating_state,
3582 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
3585 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
3586 .type = AMDGPU_RING_TYPE_GFX,
3588 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
3589 .support_64bit_ptrs = true,
3590 .vmhub = AMDGPU_GFXHUB,
3591 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
3592 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
3593 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
3594 .emit_frame_size = /* totally 242 maximum if 16 IBs */
3596 7 + /* PIPELINE_SYNC */
3598 8 + /* FENCE for VM_FLUSH */
3599 20 + /* GDS switch */
3600 4 + /* double SWITCH_BUFFER,
3601 the first COND_EXEC jump to the place just
3602 prior to this double SWITCH_BUFFER */
3610 8 + 8 + /* FENCE x2 */
3611 2, /* SWITCH_BUFFER */
3612 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
3613 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
3614 .emit_fence = gfx_v9_0_ring_emit_fence,
3615 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
3616 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
3617 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
3618 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
3619 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
3620 .test_ring = gfx_v9_0_ring_test_ring,
3621 .test_ib = gfx_v9_0_ring_test_ib,
3622 .insert_nop = amdgpu_ring_insert_nop,
3623 .pad_ib = amdgpu_ring_generic_pad_ib,
3624 .emit_switch_buffer = gfx_v9_ring_emit_sb,
3625 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
3626 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
3627 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
3628 .emit_tmz = gfx_v9_0_ring_emit_tmz,
3631 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
3632 .type = AMDGPU_RING_TYPE_COMPUTE,
3634 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
3635 .support_64bit_ptrs = true,
3636 .vmhub = AMDGPU_GFXHUB,
3637 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
3638 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
3639 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
3641 20 + /* gfx_v9_0_ring_emit_gds_switch */
3642 7 + /* gfx_v9_0_ring_emit_hdp_flush */
3643 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
3644 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
3645 24 + /* gfx_v9_0_ring_emit_vm_flush */
3646 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
3647 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
3648 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
3649 .emit_fence = gfx_v9_0_ring_emit_fence,
3650 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
3651 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
3652 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
3653 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
3654 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
3655 .test_ring = gfx_v9_0_ring_test_ring,
3656 .test_ib = gfx_v9_0_ring_test_ib,
3657 .insert_nop = amdgpu_ring_insert_nop,
3658 .pad_ib = amdgpu_ring_generic_pad_ib,
3661 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
3662 .type = AMDGPU_RING_TYPE_KIQ,
3664 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
3665 .support_64bit_ptrs = true,
3666 .vmhub = AMDGPU_GFXHUB,
3667 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
3668 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
3669 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
3671 20 + /* gfx_v9_0_ring_emit_gds_switch */
3672 7 + /* gfx_v9_0_ring_emit_hdp_flush */
3673 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
3674 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
3675 24 + /* gfx_v9_0_ring_emit_vm_flush */
3676 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
3677 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
3678 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
3679 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
3680 .test_ring = gfx_v9_0_ring_test_ring,
3681 .test_ib = gfx_v9_0_ring_test_ib,
3682 .insert_nop = amdgpu_ring_insert_nop,
3683 .pad_ib = amdgpu_ring_generic_pad_ib,
3684 .emit_rreg = gfx_v9_0_ring_emit_rreg,
3685 .emit_wreg = gfx_v9_0_ring_emit_wreg,
3688 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
3692 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
3694 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3695 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
3697 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3698 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
3701 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
3702 .set = gfx_v9_0_kiq_set_interrupt_state,
3703 .process = gfx_v9_0_kiq_irq,
3706 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
3707 .set = gfx_v9_0_set_eop_interrupt_state,
3708 .process = gfx_v9_0_eop_irq,
3711 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
3712 .set = gfx_v9_0_set_priv_reg_fault_state,
3713 .process = gfx_v9_0_priv_reg_irq,
3716 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
3717 .set = gfx_v9_0_set_priv_inst_fault_state,
3718 .process = gfx_v9_0_priv_inst_irq,
3721 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
3723 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3724 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
3726 adev->gfx.priv_reg_irq.num_types = 1;
3727 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
3729 adev->gfx.priv_inst_irq.num_types = 1;
3730 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
3732 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
3733 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
3736 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
3738 switch (adev->asic_type) {
3741 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
3748 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
3750 /* init asci gds info */
3751 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
3752 adev->gds.gws.total_size = 64;
3753 adev->gds.oa.total_size = 16;
3755 if (adev->gds.mem.total_size == 64 * 1024) {
3756 adev->gds.mem.gfx_partition_size = 4096;
3757 adev->gds.mem.cs_partition_size = 4096;
3759 adev->gds.gws.gfx_partition_size = 4;
3760 adev->gds.gws.cs_partition_size = 4;
3762 adev->gds.oa.gfx_partition_size = 4;
3763 adev->gds.oa.cs_partition_size = 1;
3765 adev->gds.mem.gfx_partition_size = 1024;
3766 adev->gds.mem.cs_partition_size = 1024;
3768 adev->gds.gws.gfx_partition_size = 16;
3769 adev->gds.gws.cs_partition_size = 16;
3771 adev->gds.oa.gfx_partition_size = 4;
3772 adev->gds.oa.cs_partition_size = 4;
3776 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3780 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
3781 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
3783 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3784 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3786 mask = gfx_v9_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
3788 return (~data) & mask;
3791 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
3792 struct amdgpu_cu_info *cu_info)
3794 int i, j, k, counter, active_cu_number = 0;
3795 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
3797 if (!adev || !cu_info)
3800 memset(cu_info, 0, sizeof(*cu_info));
3802 mutex_lock(&adev->grbm_idx_mutex);
3803 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3804 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3808 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
3809 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
3810 cu_info->bitmap[i][j] = bitmap;
3812 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
3813 if (bitmap & mask) {
3814 if (counter < adev->gfx.config.max_cu_per_sh)
3820 active_cu_number += counter;
3821 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
3824 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3825 mutex_unlock(&adev->grbm_idx_mutex);
3827 cu_info->number = active_cu_number;
3828 cu_info->ao_cu_mask = ao_cu_mask;
3833 static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
3837 bool use_doorbell = true;
3844 struct amdgpu_device *adev;
3847 if (ring->mqd_obj == NULL) {
3848 r = amdgpu_bo_create(adev,
3849 sizeof(struct v9_mqd),
3851 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3852 NULL, &ring->mqd_obj);
3854 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3859 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3860 if (unlikely(r != 0)) {
3861 gfx_v9_0_cp_compute_fini(adev);
3865 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
3868 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3869 gfx_v9_0_cp_compute_fini(adev);
3872 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3874 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3875 gfx_v9_0_cp_compute_fini(adev);
3879 /* init the mqd struct */
3880 memset(buf, 0, sizeof(struct v9_mqd));
3882 mqd = (struct v9_mqd *)buf;
3883 mqd->header = 0xC0310800;
3884 mqd->compute_pipelinestat_enable = 0x00000001;
3885 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3886 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3887 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3888 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3889 mqd->compute_misc_reserved = 0x00000003;
3890 mutex_lock(&adev->srbm_mutex);
3891 soc15_grbm_select(adev, ring->me,
3894 /* disable wptr polling */
3895 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3897 /* write the EOP addr */
3898 BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
3899 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring->queue * MEC_HPD_SIZE);
3902 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, lower_32_bits(eop_gpu_addr));
3903 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
3904 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_gpu_addr);
3905 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_gpu_addr);
3907 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3908 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3909 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3910 (order_base_2(MEC_HPD_SIZE / 4) - 1));
3911 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, tmp);
3913 /* enable doorbell? */
3914 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3916 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
3918 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
3920 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
3921 mqd->cp_hqd_pq_doorbell_control = tmp;
3923 /* disable the queue if it's active */
3925 mqd->cp_hqd_dequeue_request = 0;
3926 mqd->cp_hqd_pq_rptr = 0;
3927 mqd->cp_hqd_pq_wptr_lo = 0;
3928 mqd->cp_hqd_pq_wptr_hi = 0;
3929 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3930 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3931 for (j = 0; j < adev->usec_timeout; j++) {
3932 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3936 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
3937 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
3938 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
3939 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
3942 /* set the pointer to the MQD */
3943 mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
3944 mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3945 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
3946 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3948 /* set MQD vmid to 0 */
3949 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3950 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3951 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, tmp);
3952 mqd->cp_mqd_control = tmp;
3954 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3955 hqd_gpu_addr = ring->gpu_addr >> 8;
3956 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3957 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3958 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
3959 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
3961 /* set up the HQD, this is similar to CP_RB0_CNTL */
3962 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3963 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3964 (order_base_2(ring->ring_size / 4) - 1));
3965 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3966 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3968 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3970 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3971 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3972 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3973 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3974 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, tmp);
3975 mqd->cp_hqd_pq_control = tmp;
3977 /* set the wb address wether it's enabled or not */
3978 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3979 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3980 mqd->cp_hqd_pq_rptr_report_addr_hi =
3981 upper_32_bits(wb_gpu_addr) & 0xffff;
3982 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3983 mqd->cp_hqd_pq_rptr_report_addr_lo);
3984 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3985 mqd->cp_hqd_pq_rptr_report_addr_hi);
3987 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3988 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3989 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3990 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3991 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3992 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3993 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3994 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3996 /* enable the doorbell if requested */
3998 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3999 (AMDGPU_DOORBELL64_KIQ * 2) << 2);
4000 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
4001 (AMDGPU_DOORBELL64_MEC_RING7 * 2) << 2);
4002 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
4003 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4004 DOORBELL_OFFSET, ring->doorbell_index);
4005 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
4006 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
4007 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
4008 mqd->cp_hqd_pq_doorbell_control = tmp;
4011 mqd->cp_hqd_pq_doorbell_control = 0;
4013 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
4014 mqd->cp_hqd_pq_doorbell_control);
4016 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4017 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
4018 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
4020 /* set the vmid for the queue */
4021 mqd->cp_hqd_vmid = 0;
4022 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
4024 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
4025 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4026 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, tmp);
4027 mqd->cp_hqd_persistent_state = tmp;
4029 /* activate the queue */
4030 mqd->cp_hqd_active = 1;
4031 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
4033 soc15_grbm_select(adev, 0, 0, 0, 0);
4034 mutex_unlock(&adev->srbm_mutex);
4036 amdgpu_bo_kunmap(ring->mqd_obj);
4037 amdgpu_bo_unreserve(ring->mqd_obj);
4040 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4045 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4047 .type = AMD_IP_BLOCK_TYPE_GFX,
4051 .funcs = &gfx_v9_0_ip_funcs,