1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
7 #include "msm_gpu_trace.h"
9 #include "a6xx_gmu.xml.h"
11 #include <linux/devfreq.h>
15 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
17 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
18 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
20 /* Check that the GMU is idle */
21 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
24 /* Check tha the CX master is idle */
25 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
26 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
29 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
30 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
33 bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
35 /* wait for CP to drain ringbuffer: */
36 if (!adreno_idle(gpu, ring))
39 if (spin_until(_a6xx_check_idle(gpu))) {
40 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
41 gpu->name, __builtin_return_address(0),
42 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
43 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
44 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
45 gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
52 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
57 spin_lock_irqsave(&ring->lock, flags);
59 /* Copy the shadow to the actual register */
60 ring->cur = ring->next;
62 /* Make sure to wrap wptr if we need to */
63 wptr = get_wptr(ring);
65 spin_unlock_irqrestore(&ring->lock, flags);
67 /* Make sure everything is posted before making a decision */
70 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
73 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
76 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
77 OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
78 CP_REG_TO_MEM_0_CNT(2) |
80 OUT_RING(ring, lower_32_bits(iova));
81 OUT_RING(ring, upper_32_bits(iova));
84 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
85 struct msm_file_private *ctx)
87 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
88 struct msm_drm_private *priv = gpu->dev->dev_private;
89 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
90 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
91 struct msm_ringbuffer *ring = submit->ring;
94 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
95 rbmemptr_stats(ring, index, cpcycles_start));
98 * For PM4 the GMU register offsets are calculated from the base of the
99 * GPU registers so we need to add 0x1a800 to the register value on A630
100 * to get the right value from PM4.
102 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
103 rbmemptr_stats(ring, index, alwayson_start));
105 /* Invalidate CCU depth and color */
106 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
107 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
109 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
110 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
112 /* Submit the commands */
113 for (i = 0; i < submit->nr_cmds; i++) {
114 switch (submit->cmd[i].type) {
115 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
117 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
118 if (priv->lastctx == ctx)
121 case MSM_SUBMIT_CMD_BUF:
122 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
123 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
124 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
125 OUT_RING(ring, submit->cmd[i].size);
130 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
131 rbmemptr_stats(ring, index, cpcycles_end));
132 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
133 rbmemptr_stats(ring, index, alwayson_end));
135 /* Write the fence to the scratch register */
136 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
137 OUT_RING(ring, submit->seqno);
140 * Execute a CACHE_FLUSH_TS event. This will ensure that the
141 * timestamp is written to the memory and then triggers the interrupt
143 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
144 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
145 CP_EVENT_WRITE_0_IRQ);
146 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
147 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
148 OUT_RING(ring, submit->seqno);
150 trace_msm_gpu_submit_flush(submit,
151 gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
152 REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
154 a6xx_flush(gpu, ring);
157 const struct adreno_reglist a630_hwcg[] = {
158 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
159 {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
160 {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
161 {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
162 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
163 {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
164 {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
165 {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
166 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
167 {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
168 {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
169 {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
170 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
171 {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
172 {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
173 {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
174 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
175 {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
176 {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
177 {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
178 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
179 {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
180 {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
181 {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
182 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
183 {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
184 {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
185 {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
186 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
187 {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
188 {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
189 {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
190 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
191 {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
192 {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
193 {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
194 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
195 {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
196 {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
197 {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
198 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
199 {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
200 {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
201 {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
202 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
203 {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
204 {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
205 {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
206 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
207 {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
208 {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
209 {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
210 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
211 {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
212 {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
213 {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
214 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
215 {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
216 {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
217 {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
218 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
219 {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
220 {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
221 {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
222 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
223 {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
224 {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
225 {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
226 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
227 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
228 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
229 {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
230 {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
231 {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
232 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
233 {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
234 {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
235 {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
236 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
237 {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
238 {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
239 {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
240 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
241 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
242 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
243 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
244 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
245 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
246 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
247 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
248 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
249 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
250 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
251 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
252 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
253 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
254 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
255 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
256 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
257 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
258 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
259 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
260 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
261 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
262 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
266 const struct adreno_reglist a640_hwcg[] = {
267 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
268 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
269 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
270 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
271 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
272 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
273 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
274 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
275 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
276 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
277 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
278 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
279 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
280 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
281 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
282 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
283 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
284 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
285 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
286 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
287 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
288 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
289 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
290 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
291 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
292 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
293 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
294 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
295 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
296 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
297 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
298 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
299 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
300 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
301 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
302 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
303 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
304 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
305 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
306 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
307 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
308 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
309 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
310 {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
311 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
312 {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
313 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
314 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
315 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
319 const struct adreno_reglist a650_hwcg[] = {
320 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
321 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
322 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
323 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
324 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
325 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
326 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
327 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
328 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
329 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
330 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
331 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
332 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
333 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
334 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
335 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
336 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
337 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
338 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
339 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
340 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
341 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
342 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
343 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
344 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
345 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
346 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
347 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
348 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
349 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
350 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
351 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
352 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
353 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
354 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
355 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
356 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
357 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
358 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
359 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
360 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
361 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
362 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
363 {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
364 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
365 {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
366 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
367 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
368 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
372 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
374 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
375 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
376 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
377 const struct adreno_reglist *reg;
379 u32 val, clock_cntl_on;
381 if (!adreno_gpu->info->hwcg)
384 if (adreno_is_a630(adreno_gpu))
385 clock_cntl_on = 0x8aa8aa02;
387 clock_cntl_on = 0x8aa8aa82;
389 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
391 /* Don't re-program the registers if they are already correct */
392 if ((!state && !val) || (state && (val == clock_cntl_on)))
395 /* Disable SP clock before programming HWCG registers */
396 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
398 for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
399 gpu_write(gpu, reg->offset, state ? reg->value : 0);
401 /* Enable SP clock */
402 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
404 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
407 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
409 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
412 u32 rgb565_predicator = 0;
413 u32 uavflagprd_inv = 0;
415 /* a618 is using the hw default values */
416 if (adreno_is_a618(adreno_gpu))
419 if (adreno_is_a640(adreno_gpu))
422 if (adreno_is_a650(adreno_gpu)) {
423 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
426 rgb565_predicator = 1;
430 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
431 rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
432 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
433 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
434 uavflagprd_inv >> 4 | lower_bit << 1);
435 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
438 static int a6xx_cp_init(struct msm_gpu *gpu)
440 struct msm_ringbuffer *ring = gpu->rb[0];
442 OUT_PKT7(ring, CP_ME_INIT, 8);
444 OUT_RING(ring, 0x0000002f);
446 /* Enable multiple hardware contexts */
447 OUT_RING(ring, 0x00000003);
449 /* Enable error detection */
450 OUT_RING(ring, 0x20000000);
452 /* Don't enable header dump */
453 OUT_RING(ring, 0x00000000);
454 OUT_RING(ring, 0x00000000);
456 /* No workarounds enabled */
457 OUT_RING(ring, 0x00000000);
459 /* Pad rest of the cmds with 0's */
460 OUT_RING(ring, 0x00000000);
461 OUT_RING(ring, 0x00000000);
463 a6xx_flush(gpu, ring);
464 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
467 static int a6xx_ucode_init(struct msm_gpu *gpu)
469 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
470 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
472 if (!a6xx_gpu->sqe_bo) {
473 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
474 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
476 if (IS_ERR(a6xx_gpu->sqe_bo)) {
477 int ret = PTR_ERR(a6xx_gpu->sqe_bo);
479 a6xx_gpu->sqe_bo = NULL;
480 DRM_DEV_ERROR(&gpu->pdev->dev,
481 "Could not allocate SQE ucode: %d\n", ret);
486 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
489 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
490 REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
495 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
503 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
509 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
510 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
511 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
512 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
513 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
514 A6XX_RBBM_INT_0_MASK_CP_RB | \
515 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
516 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
517 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
518 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
519 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
521 static int a6xx_hw_init(struct msm_gpu *gpu)
523 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
524 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
527 /* Make sure the GMU keeps the GPU on while we set it up */
528 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
530 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
533 * Disable the trusted memory range - we don't actually supported secure
534 * memory rendering at this point in time and we don't want to block off
535 * part of the virtual memory space.
537 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
538 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
539 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
541 /* Turn on 64 bit addressing for all blocks */
542 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
543 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
544 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
545 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
546 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
547 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
548 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
549 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
550 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
551 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
552 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
553 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
555 /* enable hardware clockgating */
556 a6xx_set_hwcg(gpu, true);
559 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
560 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
561 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
562 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
563 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
564 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
565 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
567 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
570 if (adreno_is_a630(adreno_gpu))
571 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
573 /* Make all blocks contribute to the GPU BUSY perf counter */
574 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
576 /* Disable L2 bypass in the UCHE */
577 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
578 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
579 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
580 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
581 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
582 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
584 if (!adreno_is_a650(adreno_gpu)) {
585 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
586 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
587 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
589 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
590 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
591 0x00100000 + adreno_gpu->gmem - 1);
594 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
595 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
597 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
598 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
600 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
601 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
603 /* Setting the mem pool size */
604 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
606 /* Setting the primFifo thresholds default values */
607 if (adreno_is_a650(adreno_gpu))
608 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
609 else if (adreno_is_a640(adreno_gpu))
610 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
612 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
614 /* Set the AHB default slave response to "ERROR" */
615 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
617 /* Turn on performance counters */
618 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
620 /* Select CP0 to always count cycles */
621 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
623 a6xx_set_ubwc_config(gpu);
625 /* Enable fault detection */
626 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
627 (1 << 30) | 0x1fffff);
629 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
631 /* Set weights for bicubic filtering */
632 if (adreno_is_a650(adreno_gpu)) {
633 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
634 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
636 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
638 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
640 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
644 /* Protect registers from the CP */
645 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
647 gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
648 A6XX_PROTECT_RDONLY(0x600, 0x51));
649 gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
650 gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
651 gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
652 gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
653 gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
654 gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
655 gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
656 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
657 gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
658 gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
659 gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
660 gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
661 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
662 gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
663 A6XX_PROTECT_RDONLY(0x501, 0xa));
664 gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
665 A6XX_PROTECT_RDONLY(0x511, 0x44));
666 gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
667 gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
668 gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
669 gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
670 gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
671 A6XX_PROTECT_RW(0xbe20, 0x11f3));
672 gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
673 gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
674 gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
675 gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
676 gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
677 gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
678 A6XX_PROTECT_RDONLY(0x980, 0x4));
679 gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
681 /* Enable expanded apriv for targets that support it */
683 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
684 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
687 /* Enable interrupts */
688 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
690 ret = adreno_hw_init(gpu);
694 ret = a6xx_ucode_init(gpu);
698 /* Always come up on rb 0 */
699 a6xx_gpu->cur_ring = gpu->rb[0];
701 /* Enable the SQE_to start the CP engine */
702 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
704 ret = a6xx_cp_init(gpu);
709 * Try to load a zap shader into the secure world. If successful
710 * we can use the CP to switch out of secure mode. If not then we
711 * have no resource but to try to switch ourselves out manually. If we
712 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
713 * be blocked and a permissions violation will soon follow.
715 ret = a6xx_zap_shader_init(gpu);
717 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
718 OUT_RING(gpu->rb[0], 0x00000000);
720 a6xx_flush(gpu, gpu->rb[0]);
721 if (!a6xx_idle(gpu, gpu->rb[0]))
723 } else if (ret == -ENODEV) {
725 * This device does not use zap shader (but print a warning
726 * just in case someone got their dt wrong.. hopefully they
727 * have a debug UART to realize the error of their ways...
728 * if you mess this up you are about to crash horribly)
730 dev_warn_once(gpu->dev->dev,
731 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
732 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
740 * Tell the GMU that we are done touching the GPU and it can start power
743 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
745 if (a6xx_gpu->gmu.legacy) {
746 /* Take the GMU out of its special boot mode */
747 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
753 static void a6xx_dump(struct msm_gpu *gpu)
755 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
756 gpu_read(gpu, REG_A6XX_RBBM_STATUS));
760 #define VBIF_RESET_ACK_TIMEOUT 100
761 #define VBIF_RESET_ACK_MASK 0x00f0
763 static void a6xx_recover(struct msm_gpu *gpu)
765 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
766 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
769 adreno_dump_info(gpu);
771 for (i = 0; i < 8; i++)
772 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
773 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
779 * Turn off keep alive that might have been enabled by the hang
782 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
784 gpu->funcs->pm_suspend(gpu);
785 gpu->funcs->pm_resume(gpu);
787 msm_gpu_hw_init(gpu);
790 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
792 struct msm_gpu *gpu = arg;
794 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
796 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
797 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
798 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
799 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
804 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
806 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
808 if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
811 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
812 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
813 dev_err_ratelimited(&gpu->pdev->dev,
814 "CP | opcode error | possible opcode=0x%8.8X\n",
818 if (status & A6XX_CP_INT_CP_UCODE_ERROR)
819 dev_err_ratelimited(&gpu->pdev->dev,
820 "CP ucode error interrupt\n");
822 if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
823 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
824 gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
826 if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
827 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
829 dev_err_ratelimited(&gpu->pdev->dev,
830 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
831 val & (1 << 20) ? "READ" : "WRITE",
832 (val & 0x3ffff), val);
835 if (status & A6XX_CP_INT_CP_AHB_ERROR)
836 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
838 if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
839 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
841 if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
842 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
846 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
848 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
849 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
850 struct drm_device *dev = gpu->dev;
851 struct msm_drm_private *priv = dev->dev_private;
852 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
855 * Force the GPU to stay on until after we finish
856 * collecting information
858 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
860 DRM_DEV_ERROR(&gpu->pdev->dev,
861 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
862 ring ? ring->id : -1, ring ? ring->seqno : 0,
863 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
864 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
865 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
866 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
867 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
868 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
869 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
871 /* Turn off the hangcheck timer to keep it from bothering us */
872 del_timer(&gpu->hangcheck_timer);
874 queue_work(priv->wq, &gpu->recover_work);
877 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
879 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
881 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
883 if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
884 a6xx_fault_detect_irq(gpu);
886 if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
887 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
889 if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
890 a6xx_cp_hw_err_irq(gpu);
892 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
893 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
895 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
896 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
898 if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
899 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
901 if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
907 static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
908 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
909 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
910 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
911 REG_A6XX_CP_RB_RPTR_ADDR_LO),
912 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
913 REG_A6XX_CP_RB_RPTR_ADDR_HI),
914 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
915 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
916 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
919 static int a6xx_pm_resume(struct msm_gpu *gpu)
921 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
922 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
925 gpu->needs_hw_init = true;
927 ret = a6xx_gmu_resume(a6xx_gpu);
931 msm_gpu_resume_devfreq(gpu);
936 static int a6xx_pm_suspend(struct msm_gpu *gpu)
938 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
939 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
941 devfreq_suspend_device(gpu->devfreq.devfreq);
943 return a6xx_gmu_stop(a6xx_gpu);
946 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
948 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
949 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
951 /* Force the GPU power on so we can read this register */
952 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
954 *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
955 REG_A6XX_RBBM_PERFCTR_CP_0_HI);
957 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
961 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
963 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
964 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
966 return a6xx_gpu->cur_ring;
969 static void a6xx_destroy(struct msm_gpu *gpu)
971 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
972 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
974 if (a6xx_gpu->sqe_bo) {
975 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
976 drm_gem_object_put(a6xx_gpu->sqe_bo);
979 a6xx_gmu_remove(a6xx_gpu);
981 adreno_gpu_cleanup(adreno_gpu);
985 static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
987 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
988 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
989 u64 busy_cycles, busy_time;
992 /* Only read the gpu busy if the hardware is already active */
993 if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
996 busy_cycles = gmu_read64(&a6xx_gpu->gmu,
997 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
998 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
1000 busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
1001 do_div(busy_time, 192);
1003 gpu->devfreq.busy_cycles = busy_cycles;
1005 pm_runtime_put(a6xx_gpu->gmu.dev);
1007 if (WARN_ON(busy_time > ~0LU))
1010 return (unsigned long)busy_time;
1013 static const struct adreno_gpu_funcs funcs = {
1015 .get_param = adreno_get_param,
1016 .hw_init = a6xx_hw_init,
1017 .pm_suspend = a6xx_pm_suspend,
1018 .pm_resume = a6xx_pm_resume,
1019 .recover = a6xx_recover,
1020 .submit = a6xx_submit,
1021 .flush = a6xx_flush,
1022 .active_ring = a6xx_active_ring,
1024 .destroy = a6xx_destroy,
1025 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1028 .gpu_busy = a6xx_gpu_busy,
1029 .gpu_get_freq = a6xx_gmu_get_freq,
1030 .gpu_set_freq = a6xx_gmu_set_freq,
1031 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1032 .gpu_state_get = a6xx_gpu_state_get,
1033 .gpu_state_put = a6xx_gpu_state_put,
1035 .create_address_space = adreno_iommu_create_address_space,
1037 .get_timestamp = a6xx_get_timestamp,
1040 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
1042 struct msm_drm_private *priv = dev->dev_private;
1043 struct platform_device *pdev = priv->gpu_pdev;
1044 struct device_node *node;
1045 struct a6xx_gpu *a6xx_gpu;
1046 struct adreno_gpu *adreno_gpu;
1047 struct msm_gpu *gpu;
1050 a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
1052 return ERR_PTR(-ENOMEM);
1054 adreno_gpu = &a6xx_gpu->base;
1055 gpu = &adreno_gpu->base;
1057 adreno_gpu->registers = NULL;
1058 adreno_gpu->reg_offsets = a6xx_register_offsets;
1060 if (adreno_is_a650(adreno_gpu))
1061 adreno_gpu->base.hw_apriv = true;
1063 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
1065 a6xx_destroy(&(a6xx_gpu->base.base));
1066 return ERR_PTR(ret);
1069 /* Check if there is a GMU phandle and set it up */
1070 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
1072 /* FIXME: How do we gracefully handle this? */
1075 ret = a6xx_gmu_init(a6xx_gpu, node);
1077 a6xx_destroy(&(a6xx_gpu->base.base));
1078 return ERR_PTR(ret);
1082 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
1083 a6xx_fault_handler);