Merge branch 'master' into for-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / sdma_v4_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32
33 #include "sdma0/sdma0_4_2_offset.h"
34 #include "sdma0/sdma0_4_2_sh_mask.h"
35 #include "sdma1/sdma1_4_2_offset.h"
36 #include "sdma1/sdma1_4_2_sh_mask.h"
37 #include "sdma2/sdma2_4_2_2_offset.h"
38 #include "sdma2/sdma2_4_2_2_sh_mask.h"
39 #include "sdma3/sdma3_4_2_2_offset.h"
40 #include "sdma3/sdma3_4_2_2_sh_mask.h"
41 #include "sdma4/sdma4_4_2_2_offset.h"
42 #include "sdma4/sdma4_4_2_2_sh_mask.h"
43 #include "sdma5/sdma5_4_2_2_offset.h"
44 #include "sdma5/sdma5_4_2_2_sh_mask.h"
45 #include "sdma6/sdma6_4_2_2_offset.h"
46 #include "sdma6/sdma6_4_2_2_sh_mask.h"
47 #include "sdma7/sdma7_4_2_2_offset.h"
48 #include "sdma7/sdma7_4_2_2_sh_mask.h"
49 #include "hdp/hdp_4_0_offset.h"
50 #include "sdma0/sdma0_4_1_default.h"
51
52 #include "soc15_common.h"
53 #include "soc15.h"
54 #include "vega10_sdma_pkt_open.h"
55
56 #include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
57 #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
58
59 #include "amdgpu_ras.h"
60
61 MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
65 MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
66 MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
67 MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
68 MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
69 MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
70 MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
71 MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
72
73 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
74 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
75
76 #define WREG32_SDMA(instance, offset, value) \
77         WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
78 #define RREG32_SDMA(instance, offset) \
79         RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
80
81 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
82 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
83 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
84 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
85 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
86
87 static const struct soc15_reg_golden golden_settings_sdma_4[] = {
88         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
89         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
90         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
91         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
92         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
93         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
94         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
95         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
96         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
97         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
98         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
99         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
100         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
101         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
102         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
103         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
104         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
105         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
106         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
107         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
108         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
109         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
110         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
111         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
112         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
113 };
114
115 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
116         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
117         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
118         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
119         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
120         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
121         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
122         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
123 };
124
125 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
126         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
127         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
128         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
129         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
130         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
131         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
132         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
133 };
134
135 static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
136         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
137         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
138         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
139         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
140         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
141         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
142         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
143         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
144         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
146         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
147 };
148
149 static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
150         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
151 };
152
153 static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
154 {
155         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
156         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
157         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
158         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
159         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
160         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
162         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
164         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
165         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
166         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
167         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
168         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
169         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
170         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
171         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
172         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
173         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
174         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
175         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
176         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
177         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
178         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
179         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
180         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
181         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
182 };
183
184 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
185         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
186         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
187         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
188         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
189         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
190         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
191         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
192         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
193         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
194         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
195         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
196         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
197         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
198         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
199         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
200         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
201         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
202         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
203         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
204         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
205         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
206         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
207         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
208         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
209         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
210         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
211         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
212 };
213
214 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
215 {
216         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
217         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
218 };
219
220 static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
221 {
222         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
223         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
224 };
225
226 static const struct soc15_reg_golden golden_settings_sdma_arct[] =
227 {
228         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
229         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
230         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
231         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
232         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
233         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
234         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
235         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
236         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
237         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
238         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
239         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
240         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
241         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
242         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
243         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
244         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
245         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
246         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
247         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
248         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
249         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
250         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
251         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
252         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
253         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
254         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
255         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
256         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
257         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
258         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
259         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
260 };
261
262 static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
263         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
264         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
265         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
266         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
267         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
268         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
269         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
270         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
271         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
272         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
273 };
274
275 static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
276         { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
277         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
278         0, 0,
279         },
280         { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
281         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
282         0, 0,
283         },
284         { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
285         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
286         0, 0,
287         },
288         { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
289         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
290         0, 0,
291         },
292         { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
293         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
294         0, 0,
295         },
296         { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
297         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
298         0, 0,
299         },
300         { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
301         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
302         0, 0,
303         },
304         { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
305         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
306         0, 0,
307         },
308         { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
309         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
310         0, 0,
311         },
312         { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
313         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
314         0, 0,
315         },
316         { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
317         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
318         0, 0,
319         },
320         { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
321         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
322         0, 0,
323         },
324         { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
325         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
326         0, 0,
327         },
328         { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
329         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
330         0, 0,
331         },
332         { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
333         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
334         0, 0,
335         },
336         { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
337         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
338         0, 0,
339         },
340         { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
341         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
342         0, 0,
343         },
344         { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
345         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
346         0, 0,
347         },
348         { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
349         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
350         0, 0,
351         },
352         { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
353         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
354         0, 0,
355         },
356         { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
357         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
358         0, 0,
359         },
360         { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
361         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
362         0, 0,
363         },
364         { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
365         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
366         0, 0,
367         },
368         { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
369         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
370         0, 0,
371         },
372 };
373
374 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
375                 u32 instance, u32 offset)
376 {
377         switch (instance) {
378         case 0:
379                 return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
380         case 1:
381                 return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
382         case 2:
383                 return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
384         case 3:
385                 return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
386         case 4:
387                 return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
388         case 5:
389                 return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
390         case 6:
391                 return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
392         case 7:
393                 return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
394         default:
395                 break;
396         }
397         return 0;
398 }
399
400 static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
401 {
402         switch (seq_num) {
403         case 0:
404                 return SOC15_IH_CLIENTID_SDMA0;
405         case 1:
406                 return SOC15_IH_CLIENTID_SDMA1;
407         case 2:
408                 return SOC15_IH_CLIENTID_SDMA2;
409         case 3:
410                 return SOC15_IH_CLIENTID_SDMA3;
411         case 4:
412                 return SOC15_IH_CLIENTID_SDMA4;
413         case 5:
414                 return SOC15_IH_CLIENTID_SDMA5;
415         case 6:
416                 return SOC15_IH_CLIENTID_SDMA6;
417         case 7:
418                 return SOC15_IH_CLIENTID_SDMA7;
419         default:
420                 break;
421         }
422         return -EINVAL;
423 }
424
425 static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
426 {
427         switch (client_id) {
428         case SOC15_IH_CLIENTID_SDMA0:
429                 return 0;
430         case SOC15_IH_CLIENTID_SDMA1:
431                 return 1;
432         case SOC15_IH_CLIENTID_SDMA2:
433                 return 2;
434         case SOC15_IH_CLIENTID_SDMA3:
435                 return 3;
436         case SOC15_IH_CLIENTID_SDMA4:
437                 return 4;
438         case SOC15_IH_CLIENTID_SDMA5:
439                 return 5;
440         case SOC15_IH_CLIENTID_SDMA6:
441                 return 6;
442         case SOC15_IH_CLIENTID_SDMA7:
443                 return 7;
444         default:
445                 break;
446         }
447         return -EINVAL;
448 }
449
450 static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
451 {
452         switch (adev->asic_type) {
453         case CHIP_VEGA10:
454                 soc15_program_register_sequence(adev,
455                                                 golden_settings_sdma_4,
456                                                 ARRAY_SIZE(golden_settings_sdma_4));
457                 soc15_program_register_sequence(adev,
458                                                 golden_settings_sdma_vg10,
459                                                 ARRAY_SIZE(golden_settings_sdma_vg10));
460                 break;
461         case CHIP_VEGA12:
462                 soc15_program_register_sequence(adev,
463                                                 golden_settings_sdma_4,
464                                                 ARRAY_SIZE(golden_settings_sdma_4));
465                 soc15_program_register_sequence(adev,
466                                                 golden_settings_sdma_vg12,
467                                                 ARRAY_SIZE(golden_settings_sdma_vg12));
468                 break;
469         case CHIP_VEGA20:
470                 soc15_program_register_sequence(adev,
471                                                 golden_settings_sdma0_4_2_init,
472                                                 ARRAY_SIZE(golden_settings_sdma0_4_2_init));
473                 soc15_program_register_sequence(adev,
474                                                 golden_settings_sdma0_4_2,
475                                                 ARRAY_SIZE(golden_settings_sdma0_4_2));
476                 soc15_program_register_sequence(adev,
477                                                 golden_settings_sdma1_4_2,
478                                                 ARRAY_SIZE(golden_settings_sdma1_4_2));
479                 break;
480         case CHIP_ARCTURUS:
481                 soc15_program_register_sequence(adev,
482                                                 golden_settings_sdma_arct,
483                                                 ARRAY_SIZE(golden_settings_sdma_arct));
484                 break;
485         case CHIP_RAVEN:
486                 soc15_program_register_sequence(adev,
487                                                 golden_settings_sdma_4_1,
488                                                 ARRAY_SIZE(golden_settings_sdma_4_1));
489                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
490                         soc15_program_register_sequence(adev,
491                                                         golden_settings_sdma_rv2,
492                                                         ARRAY_SIZE(golden_settings_sdma_rv2));
493                 else
494                         soc15_program_register_sequence(adev,
495                                                         golden_settings_sdma_rv1,
496                                                         ARRAY_SIZE(golden_settings_sdma_rv1));
497                 break;
498         case CHIP_RENOIR:
499                 soc15_program_register_sequence(adev,
500                                                 golden_settings_sdma_4_3,
501                                                 ARRAY_SIZE(golden_settings_sdma_4_3));
502                 break;
503         default:
504                 break;
505         }
506 }
507
508 static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
509 {
510         int i;
511
512         /*
513          * The only chips with SDMAv4 and ULV are VG10 and VG20.
514          * Server SKUs take a different hysteresis setting from other SKUs.
515          */
516         switch (adev->asic_type) {
517         case CHIP_VEGA10:
518                 if (adev->pdev->device == 0x6860)
519                         break;
520                 return;
521         case CHIP_VEGA20:
522                 if (adev->pdev->device == 0x66a1)
523                         break;
524                 return;
525         default:
526                 return;
527         }
528
529         for (i = 0; i < adev->sdma.num_instances; i++) {
530                 uint32_t temp;
531
532                 temp = RREG32_SDMA(i, mmSDMA0_ULV_CNTL);
533                 temp = REG_SET_FIELD(temp, SDMA0_ULV_CNTL, HYSTERESIS, 0x0);
534                 WREG32_SDMA(i, mmSDMA0_ULV_CNTL, temp);
535         }
536 }
537
538 static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
539 {
540         int err = 0;
541         const struct sdma_firmware_header_v1_0 *hdr;
542
543         err = amdgpu_ucode_validate(sdma_inst->fw);
544         if (err)
545                 return err;
546
547         hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
548         sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
549         sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
550
551         if (sdma_inst->feature_version >= 20)
552                 sdma_inst->burst_nop = true;
553
554         return 0;
555 }
556
557 static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
558 {
559         int i;
560
561         for (i = 0; i < adev->sdma.num_instances; i++) {
562                 release_firmware(adev->sdma.instance[i].fw);
563                 adev->sdma.instance[i].fw = NULL;
564
565                 /* arcturus shares the same FW memory across
566                    all SDMA isntances */
567                 if (adev->asic_type == CHIP_ARCTURUS)
568                         break;
569         }
570
571         memset((void*)adev->sdma.instance, 0,
572                 sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
573 }
574
575 /**
576  * sdma_v4_0_init_microcode - load ucode images from disk
577  *
578  * @adev: amdgpu_device pointer
579  *
580  * Use the firmware interface to load the ucode images into
581  * the driver (not loaded into hw).
582  * Returns 0 on success, error on failure.
583  */
584
585 // emulation only, won't work on real chip
586 // vega10 real chip need to use PSP to load firmware
587 static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
588 {
589         const char *chip_name;
590         char fw_name[30];
591         int err = 0, i;
592         struct amdgpu_firmware_info *info = NULL;
593         const struct common_firmware_header *header = NULL;
594
595         DRM_DEBUG("\n");
596
597         switch (adev->asic_type) {
598         case CHIP_VEGA10:
599                 chip_name = "vega10";
600                 break;
601         case CHIP_VEGA12:
602                 chip_name = "vega12";
603                 break;
604         case CHIP_VEGA20:
605                 chip_name = "vega20";
606                 break;
607         case CHIP_RAVEN:
608                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
609                         chip_name = "raven2";
610                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
611                         chip_name = "picasso";
612                 else
613                         chip_name = "raven";
614                 break;
615         case CHIP_ARCTURUS:
616                 chip_name = "arcturus";
617                 break;
618         case CHIP_RENOIR:
619                 chip_name = "renoir";
620                 break;
621         default:
622                 BUG();
623         }
624
625         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
626
627         err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
628         if (err)
629                 goto out;
630
631         err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
632         if (err)
633                 goto out;
634
635         for (i = 1; i < adev->sdma.num_instances; i++) {
636                 if (adev->asic_type == CHIP_ARCTURUS) {
637                         /* Acturus will leverage the same FW memory
638                            for every SDMA instance */
639                         memcpy((void*)&adev->sdma.instance[i],
640                                (void*)&adev->sdma.instance[0],
641                                sizeof(struct amdgpu_sdma_instance));
642                 }
643                 else {
644                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
645
646                         err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
647                         if (err)
648                                 goto out;
649
650                         err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
651                         if (err)
652                                 goto out;
653                 }
654         }
655
656         DRM_DEBUG("psp_load == '%s'\n",
657                 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
658
659         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
660                 for (i = 0; i < adev->sdma.num_instances; i++) {
661                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
662                         info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
663                         info->fw = adev->sdma.instance[i].fw;
664                         header = (const struct common_firmware_header *)info->fw->data;
665                         adev->firmware.fw_size +=
666                                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
667                 }
668         }
669
670 out:
671         if (err) {
672                 DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
673                 sdma_v4_0_destroy_inst_ctx(adev);
674         }
675         return err;
676 }
677
678 /**
679  * sdma_v4_0_ring_get_rptr - get the current read pointer
680  *
681  * @ring: amdgpu ring pointer
682  *
683  * Get the current rptr from the hardware (VEGA10+).
684  */
685 static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
686 {
687         u64 *rptr;
688
689         /* XXX check if swapping is necessary on BE */
690         rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
691
692         DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
693         return ((*rptr) >> 2);
694 }
695
696 /**
697  * sdma_v4_0_ring_get_wptr - get the current write pointer
698  *
699  * @ring: amdgpu ring pointer
700  *
701  * Get the current wptr from the hardware (VEGA10+).
702  */
703 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
704 {
705         struct amdgpu_device *adev = ring->adev;
706         u64 wptr;
707
708         if (ring->use_doorbell) {
709                 /* XXX check if swapping is necessary on BE */
710                 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
711                 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
712         } else {
713                 wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
714                 wptr = wptr << 32;
715                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
716                 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
717                                 ring->me, wptr);
718         }
719
720         return wptr >> 2;
721 }
722
723 /**
724  * sdma_v4_0_page_ring_set_wptr - commit the write pointer
725  *
726  * @ring: amdgpu ring pointer
727  *
728  * Write the wptr back to the hardware (VEGA10+).
729  */
730 static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
731 {
732         struct amdgpu_device *adev = ring->adev;
733
734         DRM_DEBUG("Setting write pointer\n");
735         if (ring->use_doorbell) {
736                 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
737
738                 DRM_DEBUG("Using doorbell -- "
739                                 "wptr_offs == 0x%08x "
740                                 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
741                                 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
742                                 ring->wptr_offs,
743                                 lower_32_bits(ring->wptr << 2),
744                                 upper_32_bits(ring->wptr << 2));
745                 /* XXX check if swapping is necessary on BE */
746                 WRITE_ONCE(*wb, (ring->wptr << 2));
747                 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
748                                 ring->doorbell_index, ring->wptr << 2);
749                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
750         } else {
751                 DRM_DEBUG("Not using doorbell -- "
752                                 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
753                                 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
754                                 ring->me,
755                                 lower_32_bits(ring->wptr << 2),
756                                 ring->me,
757                                 upper_32_bits(ring->wptr << 2));
758                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
759                             lower_32_bits(ring->wptr << 2));
760                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
761                             upper_32_bits(ring->wptr << 2));
762         }
763 }
764
765 /**
766  * sdma_v4_0_page_ring_get_wptr - get the current write pointer
767  *
768  * @ring: amdgpu ring pointer
769  *
770  * Get the current wptr from the hardware (VEGA10+).
771  */
772 static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
773 {
774         struct amdgpu_device *adev = ring->adev;
775         u64 wptr;
776
777         if (ring->use_doorbell) {
778                 /* XXX check if swapping is necessary on BE */
779                 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
780         } else {
781                 wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
782                 wptr = wptr << 32;
783                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
784         }
785
786         return wptr >> 2;
787 }
788
789 /**
790  * sdma_v4_0_ring_set_wptr - commit the write pointer
791  *
792  * @ring: amdgpu ring pointer
793  *
794  * Write the wptr back to the hardware (VEGA10+).
795  */
796 static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
797 {
798         struct amdgpu_device *adev = ring->adev;
799
800         if (ring->use_doorbell) {
801                 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
802
803                 /* XXX check if swapping is necessary on BE */
804                 WRITE_ONCE(*wb, (ring->wptr << 2));
805                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
806         } else {
807                 uint64_t wptr = ring->wptr << 2;
808
809                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
810                             lower_32_bits(wptr));
811                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
812                             upper_32_bits(wptr));
813         }
814 }
815
816 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
817 {
818         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
819         int i;
820
821         for (i = 0; i < count; i++)
822                 if (sdma && sdma->burst_nop && (i == 0))
823                         amdgpu_ring_write(ring, ring->funcs->nop |
824                                 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
825                 else
826                         amdgpu_ring_write(ring, ring->funcs->nop);
827 }
828
829 /**
830  * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
831  *
832  * @ring: amdgpu ring pointer
833  * @ib: IB object to schedule
834  *
835  * Schedule an IB in the DMA ring (VEGA10).
836  */
837 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
838                                    struct amdgpu_job *job,
839                                    struct amdgpu_ib *ib,
840                                    uint32_t flags)
841 {
842         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
843
844         /* IB packet must end on a 8 DW boundary */
845         sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
846
847         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
848                           SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
849         /* base must be 32 byte aligned */
850         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
851         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
852         amdgpu_ring_write(ring, ib->length_dw);
853         amdgpu_ring_write(ring, 0);
854         amdgpu_ring_write(ring, 0);
855
856 }
857
858 static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
859                                    int mem_space, int hdp,
860                                    uint32_t addr0, uint32_t addr1,
861                                    uint32_t ref, uint32_t mask,
862                                    uint32_t inv)
863 {
864         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
865                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
866                           SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
867                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
868         if (mem_space) {
869                 /* memory */
870                 amdgpu_ring_write(ring, addr0);
871                 amdgpu_ring_write(ring, addr1);
872         } else {
873                 /* registers */
874                 amdgpu_ring_write(ring, addr0 << 2);
875                 amdgpu_ring_write(ring, addr1 << 2);
876         }
877         amdgpu_ring_write(ring, ref); /* reference */
878         amdgpu_ring_write(ring, mask); /* mask */
879         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
880                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
881 }
882
883 /**
884  * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
885  *
886  * @ring: amdgpu ring pointer
887  *
888  * Emit an hdp flush packet on the requested DMA ring.
889  */
890 static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
891 {
892         struct amdgpu_device *adev = ring->adev;
893         u32 ref_and_mask = 0;
894         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
895
896         ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
897
898         sdma_v4_0_wait_reg_mem(ring, 0, 1,
899                                adev->nbio.funcs->get_hdp_flush_done_offset(adev),
900                                adev->nbio.funcs->get_hdp_flush_req_offset(adev),
901                                ref_and_mask, ref_and_mask, 10);
902 }
903
904 /**
905  * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
906  *
907  * @ring: amdgpu ring pointer
908  * @fence: amdgpu fence object
909  *
910  * Add a DMA fence packet to the ring to write
911  * the fence seq number and DMA trap packet to generate
912  * an interrupt if needed (VEGA10).
913  */
914 static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
915                                       unsigned flags)
916 {
917         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
918         /* write the fence */
919         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
920         /* zero in first two bits */
921         BUG_ON(addr & 0x3);
922         amdgpu_ring_write(ring, lower_32_bits(addr));
923         amdgpu_ring_write(ring, upper_32_bits(addr));
924         amdgpu_ring_write(ring, lower_32_bits(seq));
925
926         /* optionally write high bits as well */
927         if (write64bit) {
928                 addr += 4;
929                 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
930                 /* zero in first two bits */
931                 BUG_ON(addr & 0x3);
932                 amdgpu_ring_write(ring, lower_32_bits(addr));
933                 amdgpu_ring_write(ring, upper_32_bits(addr));
934                 amdgpu_ring_write(ring, upper_32_bits(seq));
935         }
936
937         /* generate an interrupt */
938         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
939         amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
940 }
941
942
943 /**
944  * sdma_v4_0_gfx_stop - stop the gfx async dma engines
945  *
946  * @adev: amdgpu_device pointer
947  *
948  * Stop the gfx async dma ring buffers (VEGA10).
949  */
950 static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
951 {
952         struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
953         u32 rb_cntl, ib_cntl;
954         int i, unset = 0;
955
956         for (i = 0; i < adev->sdma.num_instances; i++) {
957                 sdma[i] = &adev->sdma.instance[i].ring;
958
959                 if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
960                         amdgpu_ttm_set_buffer_funcs_status(adev, false);
961                         unset = 1;
962                 }
963
964                 rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
965                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
966                 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
967                 ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
968                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
969                 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
970         }
971 }
972
973 /**
974  * sdma_v4_0_rlc_stop - stop the compute async dma engines
975  *
976  * @adev: amdgpu_device pointer
977  *
978  * Stop the compute async dma queues (VEGA10).
979  */
980 static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
981 {
982         /* XXX todo */
983 }
984
985 /**
986  * sdma_v4_0_page_stop - stop the page async dma engines
987  *
988  * @adev: amdgpu_device pointer
989  *
990  * Stop the page async dma ring buffers (VEGA10).
991  */
992 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
993 {
994         struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
995         u32 rb_cntl, ib_cntl;
996         int i;
997         bool unset = false;
998
999         for (i = 0; i < adev->sdma.num_instances; i++) {
1000                 sdma[i] = &adev->sdma.instance[i].page;
1001
1002                 if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
1003                         (unset == false)) {
1004                         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1005                         unset = true;
1006                 }
1007
1008                 rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
1009                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
1010                                         RB_ENABLE, 0);
1011                 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1012                 ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
1013                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
1014                                         IB_ENABLE, 0);
1015                 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
1016         }
1017 }
1018
1019 /**
1020  * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
1021  *
1022  * @adev: amdgpu_device pointer
1023  * @enable: enable/disable the DMA MEs context switch.
1024  *
1025  * Halt or unhalt the async dma engines context switch (VEGA10).
1026  */
1027 static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
1028 {
1029         u32 f32_cntl, phase_quantum = 0;
1030         int i;
1031
1032         if (amdgpu_sdma_phase_quantum) {
1033                 unsigned value = amdgpu_sdma_phase_quantum;
1034                 unsigned unit = 0;
1035
1036                 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
1037                                 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
1038                         value = (value + 1) >> 1;
1039                         unit++;
1040                 }
1041                 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
1042                             SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
1043                         value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
1044                                  SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
1045                         unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
1046                                 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
1047                         WARN_ONCE(1,
1048                         "clamping sdma_phase_quantum to %uK clock cycles\n",
1049                                   value << unit);
1050                 }
1051                 phase_quantum =
1052                         value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
1053                         unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
1054         }
1055
1056         for (i = 0; i < adev->sdma.num_instances; i++) {
1057                 f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
1058                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
1059                                 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
1060                 if (enable && amdgpu_sdma_phase_quantum) {
1061                         WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
1062                         WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
1063                         WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
1064                 }
1065                 WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
1066         }
1067
1068 }
1069
1070 /**
1071  * sdma_v4_0_enable - stop the async dma engines
1072  *
1073  * @adev: amdgpu_device pointer
1074  * @enable: enable/disable the DMA MEs.
1075  *
1076  * Halt or unhalt the async dma engines (VEGA10).
1077  */
1078 static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
1079 {
1080         u32 f32_cntl;
1081         int i;
1082
1083         if (enable == false) {
1084                 sdma_v4_0_gfx_stop(adev);
1085                 sdma_v4_0_rlc_stop(adev);
1086                 if (adev->sdma.has_page_queue)
1087                         sdma_v4_0_page_stop(adev);
1088         }
1089
1090         for (i = 0; i < adev->sdma.num_instances; i++) {
1091                 f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1092                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
1093                 WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
1094         }
1095 }
1096
1097 /**
1098  * sdma_v4_0_rb_cntl - get parameters for rb_cntl
1099  */
1100 static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
1101 {
1102         /* Set ring buffer size in dwords */
1103         uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
1104
1105         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
1106 #ifdef __BIG_ENDIAN
1107         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
1108         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1109                                 RPTR_WRITEBACK_SWAP_ENABLE, 1);
1110 #endif
1111         return rb_cntl;
1112 }
1113
1114 /**
1115  * sdma_v4_0_gfx_resume - setup and start the async dma engines
1116  *
1117  * @adev: amdgpu_device pointer
1118  * @i: instance to resume
1119  *
1120  * Set up the gfx DMA ring buffers and enable them (VEGA10).
1121  * Returns 0 for success, error for failure.
1122  */
1123 static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
1124 {
1125         struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
1126         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1127         u32 wb_offset;
1128         u32 doorbell;
1129         u32 doorbell_offset;
1130         u64 wptr_gpu_addr;
1131
1132         wb_offset = (ring->rptr_offs * 4);
1133
1134         rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
1135         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1136         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1137
1138         /* Initialize the ring buffer's read and write pointers */
1139         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
1140         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
1141         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
1142         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
1143
1144         /* set the wb address whether it's enabled or not */
1145         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
1146                upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
1147         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
1148                lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
1149
1150         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1151                                 RPTR_WRITEBACK_ENABLE, 1);
1152
1153         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
1154         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
1155
1156         ring->wptr = 0;
1157
1158         /* before programing wptr to a less value, need set minor_ptr_update first */
1159         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
1160
1161         doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
1162         doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
1163
1164         doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
1165                                  ring->use_doorbell);
1166         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1167                                         SDMA0_GFX_DOORBELL_OFFSET,
1168                                         OFFSET, ring->doorbell_index);
1169         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
1170         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
1171
1172         sdma_v4_0_ring_set_wptr(ring);
1173
1174         /* set minor_ptr_update to 0 after wptr programed */
1175         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
1176
1177         /* setup the wptr shadow polling */
1178         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1179         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
1180                     lower_32_bits(wptr_gpu_addr));
1181         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
1182                     upper_32_bits(wptr_gpu_addr));
1183         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
1184         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1185                                        SDMA0_GFX_RB_WPTR_POLL_CNTL,
1186                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1187         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1188
1189         /* enable DMA RB */
1190         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
1191         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1192
1193         ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
1194         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
1195 #ifdef __BIG_ENDIAN
1196         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
1197 #endif
1198         /* enable DMA IBs */
1199         WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
1200
1201         ring->sched.ready = true;
1202 }
1203
1204 /**
1205  * sdma_v4_0_page_resume - setup and start the async dma engines
1206  *
1207  * @adev: amdgpu_device pointer
1208  * @i: instance to resume
1209  *
1210  * Set up the page DMA ring buffers and enable them (VEGA10).
1211  * Returns 0 for success, error for failure.
1212  */
1213 static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
1214 {
1215         struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
1216         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1217         u32 wb_offset;
1218         u32 doorbell;
1219         u32 doorbell_offset;
1220         u64 wptr_gpu_addr;
1221
1222         wb_offset = (ring->rptr_offs * 4);
1223
1224         rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
1225         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1226         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1227
1228         /* Initialize the ring buffer's read and write pointers */
1229         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
1230         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
1231         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
1232         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
1233
1234         /* set the wb address whether it's enabled or not */
1235         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
1236                upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
1237         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
1238                lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
1239
1240         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
1241                                 RPTR_WRITEBACK_ENABLE, 1);
1242
1243         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
1244         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
1245
1246         ring->wptr = 0;
1247
1248         /* before programing wptr to a less value, need set minor_ptr_update first */
1249         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
1250
1251         doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
1252         doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
1253
1254         doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
1255                                  ring->use_doorbell);
1256         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1257                                         SDMA0_PAGE_DOORBELL_OFFSET,
1258                                         OFFSET, ring->doorbell_index);
1259         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
1260         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
1261
1262         /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
1263         sdma_v4_0_page_ring_set_wptr(ring);
1264
1265         /* set minor_ptr_update to 0 after wptr programed */
1266         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
1267
1268         /* setup the wptr shadow polling */
1269         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1270         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
1271                     lower_32_bits(wptr_gpu_addr));
1272         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
1273                     upper_32_bits(wptr_gpu_addr));
1274         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
1275         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1276                                        SDMA0_PAGE_RB_WPTR_POLL_CNTL,
1277                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1278         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1279
1280         /* enable DMA RB */
1281         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
1282         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1283
1284         ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
1285         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
1286 #ifdef __BIG_ENDIAN
1287         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
1288 #endif
1289         /* enable DMA IBs */
1290         WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
1291
1292         ring->sched.ready = true;
1293 }
1294
1295 static void
1296 sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
1297 {
1298         uint32_t def, data;
1299
1300         if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
1301                 /* enable idle interrupt */
1302                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1303                 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1304
1305                 if (data != def)
1306                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1307         } else {
1308                 /* disable idle interrupt */
1309                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1310                 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1311                 if (data != def)
1312                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1313         }
1314 }
1315
1316 static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
1317 {
1318         uint32_t def, data;
1319
1320         /* Enable HW based PG. */
1321         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1322         data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
1323         if (data != def)
1324                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1325
1326         /* enable interrupt */
1327         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1328         data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1329         if (data != def)
1330                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1331
1332         /* Configure hold time to filter in-valid power on/off request. Use default right now */
1333         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1334         data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
1335         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
1336         /* Configure switch time for hysteresis purpose. Use default right now */
1337         data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
1338         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
1339         if(data != def)
1340                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1341 }
1342
1343 static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
1344 {
1345         if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
1346                 return;
1347
1348         switch (adev->asic_type) {
1349         case CHIP_RAVEN:
1350         case CHIP_RENOIR:
1351                 sdma_v4_1_init_power_gating(adev);
1352                 sdma_v4_1_update_power_gating(adev, true);
1353                 break;
1354         default:
1355                 break;
1356         }
1357 }
1358
1359 /**
1360  * sdma_v4_0_rlc_resume - setup and start the async dma engines
1361  *
1362  * @adev: amdgpu_device pointer
1363  *
1364  * Set up the compute DMA queues and enable them (VEGA10).
1365  * Returns 0 for success, error for failure.
1366  */
1367 static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
1368 {
1369         sdma_v4_0_init_pg(adev);
1370
1371         return 0;
1372 }
1373
1374 /**
1375  * sdma_v4_0_load_microcode - load the sDMA ME ucode
1376  *
1377  * @adev: amdgpu_device pointer
1378  *
1379  * Loads the sDMA0/1 ucode.
1380  * Returns 0 for success, -EINVAL if the ucode is not available.
1381  */
1382 static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
1383 {
1384         const struct sdma_firmware_header_v1_0 *hdr;
1385         const __le32 *fw_data;
1386         u32 fw_size;
1387         int i, j;
1388
1389         /* halt the MEs */
1390         sdma_v4_0_enable(adev, false);
1391
1392         for (i = 0; i < adev->sdma.num_instances; i++) {
1393                 if (!adev->sdma.instance[i].fw)
1394                         return -EINVAL;
1395
1396                 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
1397                 amdgpu_ucode_print_sdma_hdr(&hdr->header);
1398                 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1399
1400                 fw_data = (const __le32 *)
1401                         (adev->sdma.instance[i].fw->data +
1402                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1403
1404                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
1405
1406                 for (j = 0; j < fw_size; j++)
1407                         WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
1408                                     le32_to_cpup(fw_data++));
1409
1410                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
1411                             adev->sdma.instance[i].fw_version);
1412         }
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * sdma_v4_0_start - setup and start the async dma engines
1419  *
1420  * @adev: amdgpu_device pointer
1421  *
1422  * Set up the DMA engines and enable them (VEGA10).
1423  * Returns 0 for success, error for failure.
1424  */
1425 static int sdma_v4_0_start(struct amdgpu_device *adev)
1426 {
1427         struct amdgpu_ring *ring;
1428         int i, r = 0;
1429
1430         if (amdgpu_sriov_vf(adev)) {
1431                 sdma_v4_0_ctx_switch_enable(adev, false);
1432                 sdma_v4_0_enable(adev, false);
1433         } else {
1434
1435                 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1436                         r = sdma_v4_0_load_microcode(adev);
1437                         if (r)
1438                                 return r;
1439                 }
1440
1441                 /* unhalt the MEs */
1442                 sdma_v4_0_enable(adev, true);
1443                 /* enable sdma ring preemption */
1444                 sdma_v4_0_ctx_switch_enable(adev, true);
1445         }
1446
1447         /* start the gfx rings and rlc compute queues */
1448         for (i = 0; i < adev->sdma.num_instances; i++) {
1449                 uint32_t temp;
1450
1451                 WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
1452                 sdma_v4_0_gfx_resume(adev, i);
1453                 if (adev->sdma.has_page_queue)
1454                         sdma_v4_0_page_resume(adev, i);
1455
1456                 /* set utc l1 enable flag always to 1 */
1457                 temp = RREG32_SDMA(i, mmSDMA0_CNTL);
1458                 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
1459                 WREG32_SDMA(i, mmSDMA0_CNTL, temp);
1460
1461                 if (!amdgpu_sriov_vf(adev)) {
1462                         /* unhalt engine */
1463                         temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1464                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
1465                         WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
1466                 }
1467         }
1468
1469         if (amdgpu_sriov_vf(adev)) {
1470                 sdma_v4_0_ctx_switch_enable(adev, true);
1471                 sdma_v4_0_enable(adev, true);
1472         } else {
1473                 r = sdma_v4_0_rlc_resume(adev);
1474                 if (r)
1475                         return r;
1476         }
1477
1478         for (i = 0; i < adev->sdma.num_instances; i++) {
1479                 ring = &adev->sdma.instance[i].ring;
1480
1481                 r = amdgpu_ring_test_helper(ring);
1482                 if (r)
1483                         return r;
1484
1485                 if (adev->sdma.has_page_queue) {
1486                         struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1487
1488                         r = amdgpu_ring_test_helper(page);
1489                         if (r)
1490                                 return r;
1491
1492                         if (adev->mman.buffer_funcs_ring == page)
1493                                 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1494                 }
1495
1496                 if (adev->mman.buffer_funcs_ring == ring)
1497                         amdgpu_ttm_set_buffer_funcs_status(adev, true);
1498         }
1499
1500         return r;
1501 }
1502
1503 /**
1504  * sdma_v4_0_ring_test_ring - simple async dma engine test
1505  *
1506  * @ring: amdgpu_ring structure holding ring information
1507  *
1508  * Test the DMA engine by writing using it to write an
1509  * value to memory. (VEGA10).
1510  * Returns 0 for success, error for failure.
1511  */
1512 static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
1513 {
1514         struct amdgpu_device *adev = ring->adev;
1515         unsigned i;
1516         unsigned index;
1517         int r;
1518         u32 tmp;
1519         u64 gpu_addr;
1520
1521         r = amdgpu_device_wb_get(adev, &index);
1522         if (r)
1523                 return r;
1524
1525         gpu_addr = adev->wb.gpu_addr + (index * 4);
1526         tmp = 0xCAFEDEAD;
1527         adev->wb.wb[index] = cpu_to_le32(tmp);
1528
1529         r = amdgpu_ring_alloc(ring, 5);
1530         if (r)
1531                 goto error_free_wb;
1532
1533         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1534                           SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1535         amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1536         amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1537         amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1538         amdgpu_ring_write(ring, 0xDEADBEEF);
1539         amdgpu_ring_commit(ring);
1540
1541         for (i = 0; i < adev->usec_timeout; i++) {
1542                 tmp = le32_to_cpu(adev->wb.wb[index]);
1543                 if (tmp == 0xDEADBEEF)
1544                         break;
1545                 udelay(1);
1546         }
1547
1548         if (i >= adev->usec_timeout)
1549                 r = -ETIMEDOUT;
1550
1551 error_free_wb:
1552         amdgpu_device_wb_free(adev, index);
1553         return r;
1554 }
1555
1556 /**
1557  * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
1558  *
1559  * @ring: amdgpu_ring structure holding ring information
1560  *
1561  * Test a simple IB in the DMA ring (VEGA10).
1562  * Returns 0 on success, error on failure.
1563  */
1564 static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1565 {
1566         struct amdgpu_device *adev = ring->adev;
1567         struct amdgpu_ib ib;
1568         struct dma_fence *f = NULL;
1569         unsigned index;
1570         long r;
1571         u32 tmp = 0;
1572         u64 gpu_addr;
1573
1574         r = amdgpu_device_wb_get(adev, &index);
1575         if (r)
1576                 return r;
1577
1578         gpu_addr = adev->wb.gpu_addr + (index * 4);
1579         tmp = 0xCAFEDEAD;
1580         adev->wb.wb[index] = cpu_to_le32(tmp);
1581         memset(&ib, 0, sizeof(ib));
1582         r = amdgpu_ib_get(adev, NULL, 256,
1583                                         AMDGPU_IB_POOL_DIRECT, &ib);
1584         if (r)
1585                 goto err0;
1586
1587         ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1588                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1589         ib.ptr[1] = lower_32_bits(gpu_addr);
1590         ib.ptr[2] = upper_32_bits(gpu_addr);
1591         ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1592         ib.ptr[4] = 0xDEADBEEF;
1593         ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1594         ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1595         ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1596         ib.length_dw = 8;
1597
1598         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1599         if (r)
1600                 goto err1;
1601
1602         r = dma_fence_wait_timeout(f, false, timeout);
1603         if (r == 0) {
1604                 r = -ETIMEDOUT;
1605                 goto err1;
1606         } else if (r < 0) {
1607                 goto err1;
1608         }
1609         tmp = le32_to_cpu(adev->wb.wb[index]);
1610         if (tmp == 0xDEADBEEF)
1611                 r = 0;
1612         else
1613                 r = -EINVAL;
1614
1615 err1:
1616         amdgpu_ib_free(adev, &ib, NULL);
1617         dma_fence_put(f);
1618 err0:
1619         amdgpu_device_wb_free(adev, index);
1620         return r;
1621 }
1622
1623
1624 /**
1625  * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
1626  *
1627  * @ib: indirect buffer to fill with commands
1628  * @pe: addr of the page entry
1629  * @src: src addr to copy from
1630  * @count: number of page entries to update
1631  *
1632  * Update PTEs by copying them from the GART using sDMA (VEGA10).
1633  */
1634 static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
1635                                   uint64_t pe, uint64_t src,
1636                                   unsigned count)
1637 {
1638         unsigned bytes = count * 8;
1639
1640         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1641                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1642         ib->ptr[ib->length_dw++] = bytes - 1;
1643         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1644         ib->ptr[ib->length_dw++] = lower_32_bits(src);
1645         ib->ptr[ib->length_dw++] = upper_32_bits(src);
1646         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1647         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1648
1649 }
1650
1651 /**
1652  * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
1653  *
1654  * @ib: indirect buffer to fill with commands
1655  * @pe: addr of the page entry
1656  * @addr: dst addr to write into pe
1657  * @count: number of page entries to update
1658  * @incr: increase next addr by incr bytes
1659  * @flags: access flags
1660  *
1661  * Update PTEs by writing them manually using sDMA (VEGA10).
1662  */
1663 static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1664                                    uint64_t value, unsigned count,
1665                                    uint32_t incr)
1666 {
1667         unsigned ndw = count * 2;
1668
1669         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1670                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1671         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1672         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1673         ib->ptr[ib->length_dw++] = ndw - 1;
1674         for (; ndw > 0; ndw -= 2) {
1675                 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1676                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1677                 value += incr;
1678         }
1679 }
1680
1681 /**
1682  * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1683  *
1684  * @ib: indirect buffer to fill with commands
1685  * @pe: addr of the page entry
1686  * @addr: dst addr to write into pe
1687  * @count: number of page entries to update
1688  * @incr: increase next addr by incr bytes
1689  * @flags: access flags
1690  *
1691  * Update the page tables using sDMA (VEGA10).
1692  */
1693 static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1694                                      uint64_t pe,
1695                                      uint64_t addr, unsigned count,
1696                                      uint32_t incr, uint64_t flags)
1697 {
1698         /* for physically contiguous pages (vram) */
1699         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1700         ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1701         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1702         ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1703         ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1704         ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1705         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1706         ib->ptr[ib->length_dw++] = incr; /* increment size */
1707         ib->ptr[ib->length_dw++] = 0;
1708         ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1709 }
1710
1711 /**
1712  * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
1713  *
1714  * @ib: indirect buffer to fill with padding
1715  *
1716  */
1717 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1718 {
1719         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1720         u32 pad_count;
1721         int i;
1722
1723         pad_count = (-ib->length_dw) & 7;
1724         for (i = 0; i < pad_count; i++)
1725                 if (sdma && sdma->burst_nop && (i == 0))
1726                         ib->ptr[ib->length_dw++] =
1727                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1728                                 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1729                 else
1730                         ib->ptr[ib->length_dw++] =
1731                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1732 }
1733
1734
1735 /**
1736  * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
1737  *
1738  * @ring: amdgpu_ring pointer
1739  *
1740  * Make sure all previous operations are completed (CIK).
1741  */
1742 static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1743 {
1744         uint32_t seq = ring->fence_drv.sync_seq;
1745         uint64_t addr = ring->fence_drv.gpu_addr;
1746
1747         /* wait for idle */
1748         sdma_v4_0_wait_reg_mem(ring, 1, 0,
1749                                addr & 0xfffffffc,
1750                                upper_32_bits(addr) & 0xffffffff,
1751                                seq, 0xffffffff, 4);
1752 }
1753
1754
1755 /**
1756  * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1757  *
1758  * @ring: amdgpu_ring pointer
1759  * @vm: amdgpu_vm pointer
1760  *
1761  * Update the page table base and flush the VM TLB
1762  * using sDMA (VEGA10).
1763  */
1764 static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1765                                          unsigned vmid, uint64_t pd_addr)
1766 {
1767         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1768 }
1769
1770 static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1771                                      uint32_t reg, uint32_t val)
1772 {
1773         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1774                           SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1775         amdgpu_ring_write(ring, reg);
1776         amdgpu_ring_write(ring, val);
1777 }
1778
1779 static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1780                                          uint32_t val, uint32_t mask)
1781 {
1782         sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1783 }
1784
1785 static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
1786 {
1787         uint fw_version = adev->sdma.instance[0].fw_version;
1788
1789         switch (adev->asic_type) {
1790         case CHIP_VEGA10:
1791                 return fw_version >= 430;
1792         case CHIP_VEGA12:
1793                 /*return fw_version >= 31;*/
1794                 return false;
1795         case CHIP_VEGA20:
1796                 return fw_version >= 123;
1797         default:
1798                 return false;
1799         }
1800 }
1801
1802 static int sdma_v4_0_early_init(void *handle)
1803 {
1804         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1805         int r;
1806
1807         if (adev->flags & AMD_IS_APU)
1808                 adev->sdma.num_instances = 1;
1809         else if (adev->asic_type == CHIP_ARCTURUS)
1810                 adev->sdma.num_instances = 8;
1811         else
1812                 adev->sdma.num_instances = 2;
1813
1814         r = sdma_v4_0_init_microcode(adev);
1815         if (r) {
1816                 DRM_ERROR("Failed to load sdma firmware!\n");
1817                 return r;
1818         }
1819
1820         /* TODO: Page queue breaks driver reload under SRIOV */
1821         if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
1822                 adev->sdma.has_page_queue = false;
1823         else if (sdma_v4_0_fw_support_paging_queue(adev))
1824                 adev->sdma.has_page_queue = true;
1825
1826         sdma_v4_0_set_ring_funcs(adev);
1827         sdma_v4_0_set_buffer_funcs(adev);
1828         sdma_v4_0_set_vm_pte_funcs(adev);
1829         sdma_v4_0_set_irq_funcs(adev);
1830         sdma_v4_0_set_ras_funcs(adev);
1831
1832         return 0;
1833 }
1834
1835 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
1836                 void *err_data,
1837                 struct amdgpu_iv_entry *entry);
1838
1839 static int sdma_v4_0_late_init(void *handle)
1840 {
1841         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1842         struct ras_ih_if ih_info = {
1843                 .cb = sdma_v4_0_process_ras_data_cb,
1844         };
1845
1846         sdma_v4_0_setup_ulv(adev);
1847
1848         if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
1849                 adev->sdma.funcs->reset_ras_error_count(adev);
1850
1851         if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
1852                 return adev->sdma.funcs->ras_late_init(adev, &ih_info);
1853         else
1854                 return 0;
1855 }
1856
1857 static int sdma_v4_0_sw_init(void *handle)
1858 {
1859         struct amdgpu_ring *ring;
1860         int r, i;
1861         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1862
1863         /* SDMA trap event */
1864         for (i = 0; i < adev->sdma.num_instances; i++) {
1865                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1866                                       SDMA0_4_0__SRCID__SDMA_TRAP,
1867                                       &adev->sdma.trap_irq);
1868                 if (r)
1869                         return r;
1870         }
1871
1872         /* SDMA SRAM ECC event */
1873         for (i = 0; i < adev->sdma.num_instances; i++) {
1874                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1875                                       SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1876                                       &adev->sdma.ecc_irq);
1877                 if (r)
1878                         return r;
1879         }
1880
1881         for (i = 0; i < adev->sdma.num_instances; i++) {
1882                 ring = &adev->sdma.instance[i].ring;
1883                 ring->ring_obj = NULL;
1884                 ring->use_doorbell = true;
1885
1886                 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1887                                 ring->use_doorbell?"true":"false");
1888
1889                 /* doorbell size is 2 dwords, get DWORD offset */
1890                 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1891
1892                 sprintf(ring->name, "sdma%d", i);
1893                 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1894                                      AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1895                                      AMDGPU_RING_PRIO_DEFAULT);
1896                 if (r)
1897                         return r;
1898
1899                 if (adev->sdma.has_page_queue) {
1900                         ring = &adev->sdma.instance[i].page;
1901                         ring->ring_obj = NULL;
1902                         ring->use_doorbell = true;
1903
1904                         /* paging queue use same doorbell index/routing as gfx queue
1905                          * with 0x400 (4096 dwords) offset on second doorbell page
1906                          */
1907                         ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1908                         ring->doorbell_index += 0x400;
1909
1910                         sprintf(ring->name, "page%d", i);
1911                         r = amdgpu_ring_init(adev, ring, 1024,
1912                                              &adev->sdma.trap_irq,
1913                                              AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1914                                              AMDGPU_RING_PRIO_DEFAULT);
1915                         if (r)
1916                                 return r;
1917                 }
1918         }
1919
1920         return r;
1921 }
1922
1923 static int sdma_v4_0_sw_fini(void *handle)
1924 {
1925         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1926         int i;
1927
1928         if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
1929                 adev->sdma.funcs->ras_fini(adev);
1930
1931         for (i = 0; i < adev->sdma.num_instances; i++) {
1932                 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1933                 if (adev->sdma.has_page_queue)
1934                         amdgpu_ring_fini(&adev->sdma.instance[i].page);
1935         }
1936
1937         sdma_v4_0_destroy_inst_ctx(adev);
1938
1939         return 0;
1940 }
1941
1942 static int sdma_v4_0_hw_init(void *handle)
1943 {
1944         int r;
1945         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1946
1947         if (adev->flags & AMD_IS_APU)
1948                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
1949
1950         if (!amdgpu_sriov_vf(adev))
1951                 sdma_v4_0_init_golden_registers(adev);
1952
1953         r = sdma_v4_0_start(adev);
1954
1955         return r;
1956 }
1957
1958 static int sdma_v4_0_hw_fini(void *handle)
1959 {
1960         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1961         int i;
1962
1963         if (amdgpu_sriov_vf(adev))
1964                 return 0;
1965
1966         for (i = 0; i < adev->sdma.num_instances; i++) {
1967                 amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1968                                AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1969         }
1970
1971         sdma_v4_0_ctx_switch_enable(adev, false);
1972         sdma_v4_0_enable(adev, false);
1973
1974         if (adev->flags & AMD_IS_APU)
1975                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
1976
1977         return 0;
1978 }
1979
1980 static int sdma_v4_0_suspend(void *handle)
1981 {
1982         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1983
1984         return sdma_v4_0_hw_fini(adev);
1985 }
1986
1987 static int sdma_v4_0_resume(void *handle)
1988 {
1989         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1990
1991         return sdma_v4_0_hw_init(adev);
1992 }
1993
1994 static bool sdma_v4_0_is_idle(void *handle)
1995 {
1996         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997         u32 i;
1998
1999         for (i = 0; i < adev->sdma.num_instances; i++) {
2000                 u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
2001
2002                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
2003                         return false;
2004         }
2005
2006         return true;
2007 }
2008
2009 static int sdma_v4_0_wait_for_idle(void *handle)
2010 {
2011         unsigned i, j;
2012         u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
2013         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2014
2015         for (i = 0; i < adev->usec_timeout; i++) {
2016                 for (j = 0; j < adev->sdma.num_instances; j++) {
2017                         sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
2018                         if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
2019                                 break;
2020                 }
2021                 if (j == adev->sdma.num_instances)
2022                         return 0;
2023                 udelay(1);
2024         }
2025         return -ETIMEDOUT;
2026 }
2027
2028 static int sdma_v4_0_soft_reset(void *handle)
2029 {
2030         /* todo */
2031
2032         return 0;
2033 }
2034
2035 static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
2036                                         struct amdgpu_irq_src *source,
2037                                         unsigned type,
2038                                         enum amdgpu_interrupt_state state)
2039 {
2040         u32 sdma_cntl;
2041
2042         sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
2043         sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
2044                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2045         WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
2046
2047         return 0;
2048 }
2049
2050 static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
2051                                       struct amdgpu_irq_src *source,
2052                                       struct amdgpu_iv_entry *entry)
2053 {
2054         uint32_t instance;
2055
2056         DRM_DEBUG("IH: SDMA trap\n");
2057         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2058         switch (entry->ring_id) {
2059         case 0:
2060                 amdgpu_fence_process(&adev->sdma.instance[instance].ring);
2061                 break;
2062         case 1:
2063                 if (adev->asic_type == CHIP_VEGA20)
2064                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2065                 break;
2066         case 2:
2067                 /* XXX compute */
2068                 break;
2069         case 3:
2070                 if (adev->asic_type != CHIP_VEGA20)
2071                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2072                 break;
2073         }
2074         return 0;
2075 }
2076
2077 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
2078                 void *err_data,
2079                 struct amdgpu_iv_entry *entry)
2080 {
2081         int instance;
2082
2083         /* When “Full RAS” is enabled, the per-IP interrupt sources should
2084          * be disabled and the driver should only look for the aggregated
2085          * interrupt via sync flood
2086          */
2087         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
2088                 goto out;
2089
2090         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2091         if (instance < 0)
2092                 goto out;
2093
2094         amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
2095
2096 out:
2097         return AMDGPU_RAS_SUCCESS;
2098 }
2099
2100 static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
2101                                               struct amdgpu_irq_src *source,
2102                                               struct amdgpu_iv_entry *entry)
2103 {
2104         int instance;
2105
2106         DRM_ERROR("Illegal instruction in SDMA command stream\n");
2107
2108         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2109         if (instance < 0)
2110                 return 0;
2111
2112         switch (entry->ring_id) {
2113         case 0:
2114                 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
2115                 break;
2116         }
2117         return 0;
2118 }
2119
2120 static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
2121                                         struct amdgpu_irq_src *source,
2122                                         unsigned type,
2123                                         enum amdgpu_interrupt_state state)
2124 {
2125         u32 sdma_edc_config;
2126
2127         sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG);
2128         sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
2129                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2130         WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config);
2131
2132         return 0;
2133 }
2134
2135 static void sdma_v4_0_update_medium_grain_clock_gating(
2136                 struct amdgpu_device *adev,
2137                 bool enable)
2138 {
2139         uint32_t data, def;
2140         int i;
2141
2142         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
2143                 for (i = 0; i < adev->sdma.num_instances; i++) {
2144                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2145                         data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2146                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2147                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2148                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2149                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2150                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2151                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2152                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2153                         if (def != data)
2154                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2155                 }
2156         } else {
2157                 for (i = 0; i < adev->sdma.num_instances; i++) {
2158                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2159                         data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2160                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2161                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2162                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2163                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2164                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2165                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2166                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2167                         if (def != data)
2168                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2169                 }
2170         }
2171 }
2172
2173
2174 static void sdma_v4_0_update_medium_grain_light_sleep(
2175                 struct amdgpu_device *adev,
2176                 bool enable)
2177 {
2178         uint32_t data, def;
2179         int i;
2180
2181         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
2182                 for (i = 0; i < adev->sdma.num_instances; i++) {
2183                         /* 1-not override: enable sdma mem light sleep */
2184                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2185                         data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2186                         if (def != data)
2187                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2188                 }
2189         } else {
2190                 for (i = 0; i < adev->sdma.num_instances; i++) {
2191                 /* 0-override:disable sdma mem light sleep */
2192                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2193                         data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2194                         if (def != data)
2195                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2196                 }
2197         }
2198 }
2199
2200 static int sdma_v4_0_set_clockgating_state(void *handle,
2201                                           enum amd_clockgating_state state)
2202 {
2203         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2204
2205         if (amdgpu_sriov_vf(adev))
2206                 return 0;
2207
2208         switch (adev->asic_type) {
2209         case CHIP_VEGA10:
2210         case CHIP_VEGA12:
2211         case CHIP_VEGA20:
2212         case CHIP_RAVEN:
2213         case CHIP_ARCTURUS:
2214         case CHIP_RENOIR:
2215                 sdma_v4_0_update_medium_grain_clock_gating(adev,
2216                                 state == AMD_CG_STATE_GATE);
2217                 sdma_v4_0_update_medium_grain_light_sleep(adev,
2218                                 state == AMD_CG_STATE_GATE);
2219                 break;
2220         default:
2221                 break;
2222         }
2223         return 0;
2224 }
2225
2226 static int sdma_v4_0_set_powergating_state(void *handle,
2227                                           enum amd_powergating_state state)
2228 {
2229         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2230
2231         switch (adev->asic_type) {
2232         case CHIP_RAVEN:
2233         case CHIP_RENOIR:
2234                 sdma_v4_1_update_power_gating(adev,
2235                                 state == AMD_PG_STATE_GATE ? true : false);
2236                 break;
2237         default:
2238                 break;
2239         }
2240
2241         return 0;
2242 }
2243
2244 static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
2245 {
2246         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2247         int data;
2248
2249         if (amdgpu_sriov_vf(adev))
2250                 *flags = 0;
2251
2252         /* AMD_CG_SUPPORT_SDMA_MGCG */
2253         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
2254         if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
2255                 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2256
2257         /* AMD_CG_SUPPORT_SDMA_LS */
2258         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2259         if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2260                 *flags |= AMD_CG_SUPPORT_SDMA_LS;
2261 }
2262
2263 const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
2264         .name = "sdma_v4_0",
2265         .early_init = sdma_v4_0_early_init,
2266         .late_init = sdma_v4_0_late_init,
2267         .sw_init = sdma_v4_0_sw_init,
2268         .sw_fini = sdma_v4_0_sw_fini,
2269         .hw_init = sdma_v4_0_hw_init,
2270         .hw_fini = sdma_v4_0_hw_fini,
2271         .suspend = sdma_v4_0_suspend,
2272         .resume = sdma_v4_0_resume,
2273         .is_idle = sdma_v4_0_is_idle,
2274         .wait_for_idle = sdma_v4_0_wait_for_idle,
2275         .soft_reset = sdma_v4_0_soft_reset,
2276         .set_clockgating_state = sdma_v4_0_set_clockgating_state,
2277         .set_powergating_state = sdma_v4_0_set_powergating_state,
2278         .get_clockgating_state = sdma_v4_0_get_clockgating_state,
2279 };
2280
2281 static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
2282         .type = AMDGPU_RING_TYPE_SDMA,
2283         .align_mask = 0xf,
2284         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2285         .support_64bit_ptrs = true,
2286         .vmhub = AMDGPU_MMHUB_0,
2287         .get_rptr = sdma_v4_0_ring_get_rptr,
2288         .get_wptr = sdma_v4_0_ring_get_wptr,
2289         .set_wptr = sdma_v4_0_ring_set_wptr,
2290         .emit_frame_size =
2291                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2292                 3 + /* hdp invalidate */
2293                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2294                 /* sdma_v4_0_ring_emit_vm_flush */
2295                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2296                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2297                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2298         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2299         .emit_ib = sdma_v4_0_ring_emit_ib,
2300         .emit_fence = sdma_v4_0_ring_emit_fence,
2301         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2302         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2303         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2304         .test_ring = sdma_v4_0_ring_test_ring,
2305         .test_ib = sdma_v4_0_ring_test_ib,
2306         .insert_nop = sdma_v4_0_ring_insert_nop,
2307         .pad_ib = sdma_v4_0_ring_pad_ib,
2308         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2309         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2310         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2311 };
2312
2313 /*
2314  * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
2315  * So create a individual constant ring_funcs for those instances.
2316  */
2317 static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
2318         .type = AMDGPU_RING_TYPE_SDMA,
2319         .align_mask = 0xf,
2320         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2321         .support_64bit_ptrs = true,
2322         .vmhub = AMDGPU_MMHUB_1,
2323         .get_rptr = sdma_v4_0_ring_get_rptr,
2324         .get_wptr = sdma_v4_0_ring_get_wptr,
2325         .set_wptr = sdma_v4_0_ring_set_wptr,
2326         .emit_frame_size =
2327                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2328                 3 + /* hdp invalidate */
2329                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2330                 /* sdma_v4_0_ring_emit_vm_flush */
2331                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2332                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2333                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2334         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2335         .emit_ib = sdma_v4_0_ring_emit_ib,
2336         .emit_fence = sdma_v4_0_ring_emit_fence,
2337         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2338         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2339         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2340         .test_ring = sdma_v4_0_ring_test_ring,
2341         .test_ib = sdma_v4_0_ring_test_ib,
2342         .insert_nop = sdma_v4_0_ring_insert_nop,
2343         .pad_ib = sdma_v4_0_ring_pad_ib,
2344         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2345         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2346         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2347 };
2348
2349 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
2350         .type = AMDGPU_RING_TYPE_SDMA,
2351         .align_mask = 0xf,
2352         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2353         .support_64bit_ptrs = true,
2354         .vmhub = AMDGPU_MMHUB_0,
2355         .get_rptr = sdma_v4_0_ring_get_rptr,
2356         .get_wptr = sdma_v4_0_page_ring_get_wptr,
2357         .set_wptr = sdma_v4_0_page_ring_set_wptr,
2358         .emit_frame_size =
2359                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2360                 3 + /* hdp invalidate */
2361                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2362                 /* sdma_v4_0_ring_emit_vm_flush */
2363                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2364                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2365                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2366         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2367         .emit_ib = sdma_v4_0_ring_emit_ib,
2368         .emit_fence = sdma_v4_0_ring_emit_fence,
2369         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2370         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2371         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2372         .test_ring = sdma_v4_0_ring_test_ring,
2373         .test_ib = sdma_v4_0_ring_test_ib,
2374         .insert_nop = sdma_v4_0_ring_insert_nop,
2375         .pad_ib = sdma_v4_0_ring_pad_ib,
2376         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2377         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2378         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2379 };
2380
2381 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
2382         .type = AMDGPU_RING_TYPE_SDMA,
2383         .align_mask = 0xf,
2384         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2385         .support_64bit_ptrs = true,
2386         .vmhub = AMDGPU_MMHUB_1,
2387         .get_rptr = sdma_v4_0_ring_get_rptr,
2388         .get_wptr = sdma_v4_0_page_ring_get_wptr,
2389         .set_wptr = sdma_v4_0_page_ring_set_wptr,
2390         .emit_frame_size =
2391                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2392                 3 + /* hdp invalidate */
2393                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2394                 /* sdma_v4_0_ring_emit_vm_flush */
2395                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2396                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2397                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2398         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2399         .emit_ib = sdma_v4_0_ring_emit_ib,
2400         .emit_fence = sdma_v4_0_ring_emit_fence,
2401         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2402         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2403         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2404         .test_ring = sdma_v4_0_ring_test_ring,
2405         .test_ib = sdma_v4_0_ring_test_ib,
2406         .insert_nop = sdma_v4_0_ring_insert_nop,
2407         .pad_ib = sdma_v4_0_ring_pad_ib,
2408         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2409         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2410         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2411 };
2412
2413 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
2414 {
2415         int i;
2416
2417         for (i = 0; i < adev->sdma.num_instances; i++) {
2418                 if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
2419                         adev->sdma.instance[i].ring.funcs =
2420                                         &sdma_v4_0_ring_funcs_2nd_mmhub;
2421                 else
2422                         adev->sdma.instance[i].ring.funcs =
2423                                         &sdma_v4_0_ring_funcs;
2424                 adev->sdma.instance[i].ring.me = i;
2425                 if (adev->sdma.has_page_queue) {
2426                         if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
2427                                 adev->sdma.instance[i].page.funcs =
2428                                         &sdma_v4_0_page_ring_funcs_2nd_mmhub;
2429                         else
2430                                 adev->sdma.instance[i].page.funcs =
2431                                         &sdma_v4_0_page_ring_funcs;
2432                         adev->sdma.instance[i].page.me = i;
2433                 }
2434         }
2435 }
2436
2437 static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
2438         .set = sdma_v4_0_set_trap_irq_state,
2439         .process = sdma_v4_0_process_trap_irq,
2440 };
2441
2442 static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
2443         .process = sdma_v4_0_process_illegal_inst_irq,
2444 };
2445
2446 static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
2447         .set = sdma_v4_0_set_ecc_irq_state,
2448         .process = amdgpu_sdma_process_ecc_irq,
2449 };
2450
2451
2452
2453 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2454 {
2455         switch (adev->sdma.num_instances) {
2456         case 1:
2457                 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
2458                 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
2459                 break;
2460         case 8:
2461                 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
2462                 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
2463                 break;
2464         case 2:
2465         default:
2466                 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2;
2467                 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2;
2468                 break;
2469         }
2470         adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
2471         adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
2472         adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
2473 }
2474
2475 /**
2476  * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
2477  *
2478  * @ring: amdgpu_ring structure holding ring information
2479  * @src_offset: src GPU address
2480  * @dst_offset: dst GPU address
2481  * @byte_count: number of bytes to xfer
2482  *
2483  * Copy GPU buffers using the DMA engine (VEGA10/12).
2484  * Used by the amdgpu ttm implementation to move pages if
2485  * registered as the asic copy callback.
2486  */
2487 static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
2488                                        uint64_t src_offset,
2489                                        uint64_t dst_offset,
2490                                        uint32_t byte_count,
2491                                        bool tmz)
2492 {
2493         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2494                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2495                 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
2496         ib->ptr[ib->length_dw++] = byte_count - 1;
2497         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2498         ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2499         ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2500         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2501         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2502 }
2503
2504 /**
2505  * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
2506  *
2507  * @ring: amdgpu_ring structure holding ring information
2508  * @src_data: value to write to buffer
2509  * @dst_offset: dst GPU address
2510  * @byte_count: number of bytes to xfer
2511  *
2512  * Fill GPU buffers using the DMA engine (VEGA10/12).
2513  */
2514 static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
2515                                        uint32_t src_data,
2516                                        uint64_t dst_offset,
2517                                        uint32_t byte_count)
2518 {
2519         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2520         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2521         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2522         ib->ptr[ib->length_dw++] = src_data;
2523         ib->ptr[ib->length_dw++] = byte_count - 1;
2524 }
2525
2526 static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
2527         .copy_max_bytes = 0x400000,
2528         .copy_num_dw = 7,
2529         .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
2530
2531         .fill_max_bytes = 0x400000,
2532         .fill_num_dw = 5,
2533         .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
2534 };
2535
2536 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
2537 {
2538         adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
2539         if (adev->sdma.has_page_queue)
2540                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2541         else
2542                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2543 }
2544
2545 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
2546         .copy_pte_num_dw = 7,
2547         .copy_pte = sdma_v4_0_vm_copy_pte,
2548
2549         .write_pte = sdma_v4_0_vm_write_pte,
2550         .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
2551 };
2552
2553 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
2554 {
2555         struct drm_gpu_scheduler *sched;
2556         unsigned i;
2557
2558         adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
2559         for (i = 0; i < adev->sdma.num_instances; i++) {
2560                 if (adev->sdma.has_page_queue)
2561                         sched = &adev->sdma.instance[i].page.sched;
2562                 else
2563                         sched = &adev->sdma.instance[i].ring.sched;
2564                 adev->vm_manager.vm_pte_scheds[i] = sched;
2565         }
2566         adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2567 }
2568
2569 static void sdma_v4_0_get_ras_error_count(uint32_t value,
2570                                         uint32_t instance,
2571                                         uint32_t *sec_count)
2572 {
2573         uint32_t i;
2574         uint32_t sec_cnt;
2575
2576         /* double bits error (multiple bits) error detection is not supported */
2577         for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
2578                 /* the SDMA_EDC_COUNTER register in each sdma instance
2579                  * shares the same sed shift_mask
2580                  * */
2581                 sec_cnt = (value &
2582                         sdma_v4_0_ras_fields[i].sec_count_mask) >>
2583                         sdma_v4_0_ras_fields[i].sec_count_shift;
2584                 if (sec_cnt) {
2585                         DRM_INFO("Detected %s in SDMA%d, SED %d\n",
2586                                 sdma_v4_0_ras_fields[i].name,
2587                                 instance, sec_cnt);
2588                         *sec_count += sec_cnt;
2589                 }
2590         }
2591 }
2592
2593 static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
2594                         uint32_t instance, void *ras_error_status)
2595 {
2596         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
2597         uint32_t sec_count = 0;
2598         uint32_t reg_value = 0;
2599
2600         reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
2601         /* double bit error is not supported */
2602         if (reg_value)
2603                 sdma_v4_0_get_ras_error_count(reg_value,
2604                                 instance, &sec_count);
2605         /* err_data->ce_count should be initialized to 0
2606          * before calling into this function */
2607         err_data->ce_count += sec_count;
2608         /* double bit error is not supported
2609          * set ue count to 0 */
2610         err_data->ue_count = 0;
2611
2612         return 0;
2613 };
2614
2615 static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
2616 {
2617         int i;
2618
2619         /* read back edc counter registers to clear the counters */
2620         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2621                 for (i = 0; i < adev->sdma.num_instances; i++)
2622                         RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
2623         }
2624 }
2625
2626 static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
2627         .ras_late_init = amdgpu_sdma_ras_late_init,
2628         .ras_fini = amdgpu_sdma_ras_fini,
2629         .query_ras_error_count = sdma_v4_0_query_ras_error_count,
2630         .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
2631 };
2632
2633 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2634 {
2635         switch (adev->asic_type) {
2636         case CHIP_VEGA20:
2637         case CHIP_ARCTURUS:
2638                 adev->sdma.funcs = &sdma_v4_0_ras_funcs;
2639                 break;
2640         default:
2641                 break;
2642         }
2643 }
2644
2645 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
2646         .type = AMD_IP_BLOCK_TYPE_SDMA,
2647         .major = 4,
2648         .minor = 0,
2649         .rev = 0,
2650         .funcs = &sdma_v4_0_ip_funcs,
2651 };