1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
7 #include "msm_gpu_trace.h"
9 #include "a6xx_gmu.xml.h"
11 #include <linux/bitfield.h>
12 #include <linux/devfreq.h>
13 #include <linux/nvmem-consumer.h>
14 #include <linux/soc/qcom/llcc-qcom.h>
18 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
20 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
21 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
23 /* Check that the GMU is idle */
24 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
27 /* Check tha the CX master is idle */
28 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
29 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
32 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
33 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
36 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
38 /* wait for CP to drain ringbuffer: */
39 if (!adreno_idle(gpu, ring))
42 if (spin_until(_a6xx_check_idle(gpu))) {
43 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
44 gpu->name, __builtin_return_address(0),
45 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
46 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
47 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
48 gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
55 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
57 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
58 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
62 /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
63 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
64 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
66 OUT_PKT7(ring, CP_WHERE_AM_I, 2);
67 OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
68 OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
71 spin_lock_irqsave(&ring->preempt_lock, flags);
73 /* Copy the shadow to the actual register */
74 ring->cur = ring->next;
76 /* Make sure to wrap wptr if we need to */
77 wptr = get_wptr(ring);
79 spin_unlock_irqrestore(&ring->preempt_lock, flags);
81 /* Make sure everything is posted before making a decision */
84 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
87 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
90 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
91 OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
92 CP_REG_TO_MEM_0_CNT(2) |
94 OUT_RING(ring, lower_32_bits(iova));
95 OUT_RING(ring, upper_32_bits(iova));
98 static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
99 struct msm_ringbuffer *ring, struct msm_file_private *ctx)
103 u64 memptr = rbmemptr(ring, ttbr0);
105 if (ctx == a6xx_gpu->cur_ctx)
108 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
111 /* Execute the table update */
112 OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
113 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
116 CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
117 CP_SMMU_TABLE_UPDATE_1_ASID(asid));
118 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
119 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
122 * Write the new TTBR0 to the memstore. This is good for debugging.
124 OUT_PKT7(ring, CP_MEM_WRITE, 4);
125 OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
126 OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
127 OUT_RING(ring, lower_32_bits(ttbr));
128 OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
131 * And finally, trigger a uche flush to be sure there isn't anything
132 * lingering in that part of the GPU
135 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
136 OUT_RING(ring, 0x31);
138 a6xx_gpu->cur_ctx = ctx;
141 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
143 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
144 struct msm_drm_private *priv = gpu->dev->dev_private;
145 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
146 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
147 struct msm_ringbuffer *ring = submit->ring;
150 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
152 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
153 rbmemptr_stats(ring, index, cpcycles_start));
156 * For PM4 the GMU register offsets are calculated from the base of the
157 * GPU registers so we need to add 0x1a800 to the register value on A630
158 * to get the right value from PM4.
160 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
161 rbmemptr_stats(ring, index, alwayson_start));
163 /* Invalidate CCU depth and color */
164 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
165 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
167 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
168 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
170 /* Submit the commands */
171 for (i = 0; i < submit->nr_cmds; i++) {
172 switch (submit->cmd[i].type) {
173 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
175 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
176 if (priv->lastctx == submit->queue->ctx)
179 case MSM_SUBMIT_CMD_BUF:
180 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
181 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
182 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
183 OUT_RING(ring, submit->cmd[i].size);
188 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
189 rbmemptr_stats(ring, index, cpcycles_end));
190 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
191 rbmemptr_stats(ring, index, alwayson_end));
193 /* Write the fence to the scratch register */
194 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
195 OUT_RING(ring, submit->seqno);
198 * Execute a CACHE_FLUSH_TS event. This will ensure that the
199 * timestamp is written to the memory and then triggers the interrupt
201 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
202 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
203 CP_EVENT_WRITE_0_IRQ);
204 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
205 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
206 OUT_RING(ring, submit->seqno);
208 trace_msm_gpu_submit_flush(submit,
209 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
210 REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
212 a6xx_flush(gpu, ring);
215 const struct adreno_reglist a630_hwcg[] = {
216 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
217 {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
218 {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
219 {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
220 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
221 {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
222 {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
223 {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
224 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
225 {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
226 {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
227 {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
228 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
229 {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
230 {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
231 {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
232 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
233 {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
234 {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
235 {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
236 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
237 {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
238 {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
239 {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
240 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
241 {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
242 {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
243 {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
244 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
245 {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
246 {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
247 {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
248 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
249 {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
250 {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
251 {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
252 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
253 {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
254 {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
255 {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
256 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
257 {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
258 {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
259 {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
260 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
261 {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
262 {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
263 {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
264 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
265 {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
266 {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
267 {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
268 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
269 {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
270 {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
271 {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
272 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
273 {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
274 {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
275 {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
276 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
277 {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
278 {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
279 {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
280 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
281 {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
282 {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
283 {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
284 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
285 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
286 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
287 {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
288 {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
289 {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
290 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
291 {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
292 {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
293 {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
294 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
295 {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
296 {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
297 {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
298 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
299 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
300 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
301 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
302 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
303 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
304 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
305 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
306 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
307 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
308 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
309 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
310 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
311 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
312 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
313 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
314 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
315 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
316 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
317 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
318 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
319 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
320 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
324 const struct adreno_reglist a640_hwcg[] = {
325 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
326 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
327 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
328 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
329 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
330 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
331 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
332 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
333 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
334 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
335 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
336 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
337 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
338 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
339 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
340 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
341 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
342 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
343 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
344 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
345 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
346 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
347 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
348 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
349 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
350 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
351 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
352 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
353 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
354 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
355 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
356 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
357 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
358 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
359 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
360 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
361 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
362 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
363 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
364 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
365 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
366 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
367 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
368 {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
369 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
370 {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
371 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
372 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
373 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
377 const struct adreno_reglist a650_hwcg[] = {
378 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
379 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
380 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
381 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
382 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
383 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
384 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
385 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
386 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
387 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
388 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
389 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
390 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
391 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
392 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
393 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
394 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
395 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
396 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
397 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
398 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
399 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
400 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
401 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
402 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
403 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
404 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
405 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
406 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
407 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
408 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
409 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
410 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
411 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
412 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
413 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
414 {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
415 {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
416 {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
417 {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
418 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
419 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
420 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
421 {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
422 {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
423 {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
424 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
425 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
426 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
430 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
432 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
433 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
434 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
435 const struct adreno_reglist *reg;
437 u32 val, clock_cntl_on;
439 if (!adreno_gpu->info->hwcg)
442 if (adreno_is_a630(adreno_gpu))
443 clock_cntl_on = 0x8aa8aa02;
445 clock_cntl_on = 0x8aa8aa82;
447 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
449 /* Don't re-program the registers if they are already correct */
450 if ((!state && !val) || (state && (val == clock_cntl_on)))
453 /* Disable SP clock before programming HWCG registers */
454 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
456 for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
457 gpu_write(gpu, reg->offset, state ? reg->value : 0);
459 /* Enable SP clock */
460 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
462 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
465 /* For a615, a616, a618, A619, a630, a640 and a680 */
466 static const u32 a6xx_protect[] = {
467 A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
468 A6XX_PROTECT_RDONLY(0x00501, 0x0005),
469 A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
470 A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
471 A6XX_PROTECT_NORDWR(0x00510, 0x0000),
472 A6XX_PROTECT_NORDWR(0x00534, 0x0000),
473 A6XX_PROTECT_NORDWR(0x00800, 0x0082),
474 A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
475 A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
476 A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
477 A6XX_PROTECT_NORDWR(0x00900, 0x004d),
478 A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
479 A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
480 A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
481 A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
482 A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
483 A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
484 A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
485 A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
486 A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
487 A6XX_PROTECT_NORDWR(0x09624, 0x01db),
488 A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
489 A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
490 A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
491 A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
492 A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
493 A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
494 A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
495 A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
496 A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
497 A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
498 A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
501 /* These are for a620 and a650 */
502 static const u32 a650_protect[] = {
503 A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
504 A6XX_PROTECT_RDONLY(0x00501, 0x0005),
505 A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
506 A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
507 A6XX_PROTECT_NORDWR(0x00510, 0x0000),
508 A6XX_PROTECT_NORDWR(0x00534, 0x0000),
509 A6XX_PROTECT_NORDWR(0x00800, 0x0082),
510 A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
511 A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
512 A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
513 A6XX_PROTECT_NORDWR(0x00900, 0x004d),
514 A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
515 A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
516 A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
517 A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
518 A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
519 A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
520 A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
521 A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
522 A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
523 A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
524 A6XX_PROTECT_NORDWR(0x09624, 0x01db),
525 A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
526 A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
527 A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
528 A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
529 A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
530 A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
531 A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
532 A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
533 A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
534 A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
535 A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
536 A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
537 A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
538 A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
539 A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
540 A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
541 A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
544 static void a6xx_set_cp_protect(struct msm_gpu *gpu)
546 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
547 const u32 *regs = a6xx_protect;
548 unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
550 BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
551 BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
553 if (adreno_is_a650(adreno_gpu)) {
555 count = ARRAY_SIZE(a650_protect);
560 * Enable access protection to privileged registers, fault on an access
561 * protect violation and select the last span to protect from the start
562 * address all the way to the end of the register address space
564 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
566 for (i = 0; i < count - 1; i++)
567 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
568 /* last CP_PROTECT to have "infinite" length on the last entry */
569 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
572 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
574 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
577 u32 rgb565_predicator = 0;
578 u32 uavflagprd_inv = 0;
580 /* a618 is using the hw default values */
581 if (adreno_is_a618(adreno_gpu))
584 if (adreno_is_a640(adreno_gpu))
587 if (adreno_is_a650(adreno_gpu)) {
588 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
591 rgb565_predicator = 1;
595 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
596 rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
597 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
598 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
599 uavflagprd_inv << 4 | lower_bit << 1);
600 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
603 static int a6xx_cp_init(struct msm_gpu *gpu)
605 struct msm_ringbuffer *ring = gpu->rb[0];
607 OUT_PKT7(ring, CP_ME_INIT, 8);
609 OUT_RING(ring, 0x0000002f);
611 /* Enable multiple hardware contexts */
612 OUT_RING(ring, 0x00000003);
614 /* Enable error detection */
615 OUT_RING(ring, 0x20000000);
617 /* Don't enable header dump */
618 OUT_RING(ring, 0x00000000);
619 OUT_RING(ring, 0x00000000);
621 /* No workarounds enabled */
622 OUT_RING(ring, 0x00000000);
624 /* Pad rest of the cmds with 0's */
625 OUT_RING(ring, 0x00000000);
626 OUT_RING(ring, 0x00000000);
628 a6xx_flush(gpu, ring);
629 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
633 * Check that the microcode version is new enough to include several key
634 * security fixes. Return true if the ucode is safe.
636 static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
637 struct drm_gem_object *obj)
639 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
640 struct msm_gpu *gpu = &adreno_gpu->base;
641 u32 *buf = msm_gem_get_vaddr(obj);
648 * Targets up to a640 (a618, a630 and a640) need to check for a
649 * microcode version that is patched to support the whereami opcode or
650 * one that is new enough to include it by default.
652 if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
653 adreno_is_a640(adreno_gpu)) {
655 * If the lowest nibble is 0xa that is an indication that this
656 * microcode has been patched. The actual version is in dword
657 * [3] but we only care about the patchlevel which is the lowest
658 * nibble of dword [3]
660 * Otherwise check that the firmware is greater than or equal
661 * to 1.90 which was the first version that had this fix built
664 if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
665 (buf[0] & 0xfff) >= 0x190) {
666 a6xx_gpu->has_whereami = true;
671 DRM_DEV_ERROR(&gpu->pdev->dev,
672 "a630 SQE ucode is too old. Have version %x need at least %x\n",
673 buf[0] & 0xfff, 0x190);
676 * a650 tier targets don't need whereami but still need to be
677 * equal to or newer than 0.95 for other security fixes
679 if (adreno_is_a650(adreno_gpu)) {
680 if ((buf[0] & 0xfff) >= 0x095) {
685 DRM_DEV_ERROR(&gpu->pdev->dev,
686 "a650 SQE ucode is too old. Have version %x need at least %x\n",
687 buf[0] & 0xfff, 0x095);
691 * When a660 is added those targets should return true here
692 * since those have all the critical security fixes built in
697 msm_gem_put_vaddr(obj);
701 static int a6xx_ucode_init(struct msm_gpu *gpu)
703 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
704 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
706 if (!a6xx_gpu->sqe_bo) {
707 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
708 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
710 if (IS_ERR(a6xx_gpu->sqe_bo)) {
711 int ret = PTR_ERR(a6xx_gpu->sqe_bo);
713 a6xx_gpu->sqe_bo = NULL;
714 DRM_DEV_ERROR(&gpu->pdev->dev,
715 "Could not allocate SQE ucode: %d\n", ret);
720 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
721 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
722 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
723 drm_gem_object_put(a6xx_gpu->sqe_bo);
725 a6xx_gpu->sqe_bo = NULL;
730 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
731 REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
736 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
744 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
750 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
751 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
752 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
753 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
754 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
755 A6XX_RBBM_INT_0_MASK_CP_RB | \
756 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
757 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
758 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
759 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
760 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
762 static int a6xx_hw_init(struct msm_gpu *gpu)
764 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
765 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
768 /* Make sure the GMU keeps the GPU on while we set it up */
769 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
771 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
774 * Disable the trusted memory range - we don't actually supported secure
775 * memory rendering at this point in time and we don't want to block off
776 * part of the virtual memory space.
778 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
779 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
780 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
782 /* Turn on 64 bit addressing for all blocks */
783 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
784 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
785 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
786 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
787 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
788 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
789 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
790 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
791 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
792 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
793 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
794 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
796 /* enable hardware clockgating */
797 a6xx_set_hwcg(gpu, true);
800 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
801 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
802 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
803 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
804 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
805 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
806 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
808 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
811 if (adreno_is_a630(adreno_gpu))
812 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
814 /* Make all blocks contribute to the GPU BUSY perf counter */
815 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
817 /* Disable L2 bypass in the UCHE */
818 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
819 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
820 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
821 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
822 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
823 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
825 if (!adreno_is_a650(adreno_gpu)) {
826 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
827 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
828 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
830 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
831 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
832 0x00100000 + adreno_gpu->gmem - 1);
835 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
836 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
838 if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
839 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
841 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
842 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
844 /* Setting the mem pool size */
845 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
847 /* Setting the primFifo thresholds default values,
848 * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
850 if (adreno_is_a650(adreno_gpu))
851 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
852 else if (adreno_is_a640(adreno_gpu))
853 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
855 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
857 /* Set the AHB default slave response to "ERROR" */
858 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
860 /* Turn on performance counters */
861 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
863 /* Select CP0 to always count cycles */
864 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
866 a6xx_set_ubwc_config(gpu);
868 /* Enable fault detection */
869 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
870 (1 << 30) | 0x1fffff);
872 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
874 /* Set weights for bicubic filtering */
875 if (adreno_is_a650(adreno_gpu)) {
876 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
877 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
879 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
881 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
883 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
887 /* Protect registers from the CP */
888 a6xx_set_cp_protect(gpu);
890 /* Enable expanded apriv for targets that support it */
892 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
893 (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
896 /* Enable interrupts */
897 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
899 ret = adreno_hw_init(gpu);
903 ret = a6xx_ucode_init(gpu);
907 /* Set the ringbuffer address */
908 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
911 /* Targets that support extended APRIV can use the RPTR shadow from
912 * hardware but all the other ones need to disable the feature. Targets
913 * that support the WHERE_AM_I opcode can use that instead
915 if (adreno_gpu->base.hw_apriv)
916 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
918 gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
919 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
922 * Expanded APRIV and targets that support WHERE_AM_I both need a
923 * privileged buffer to store the RPTR shadow
926 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
927 if (!a6xx_gpu->shadow_bo) {
928 a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
929 sizeof(u32) * gpu->nr_rings,
930 MSM_BO_WC | MSM_BO_MAP_PRIV,
931 gpu->aspace, &a6xx_gpu->shadow_bo,
932 &a6xx_gpu->shadow_iova);
934 if (IS_ERR(a6xx_gpu->shadow))
935 return PTR_ERR(a6xx_gpu->shadow);
938 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
939 REG_A6XX_CP_RB_RPTR_ADDR_HI,
940 shadowptr(a6xx_gpu, gpu->rb[0]));
943 /* Always come up on rb 0 */
944 a6xx_gpu->cur_ring = gpu->rb[0];
946 a6xx_gpu->cur_ctx = NULL;
948 /* Enable the SQE_to start the CP engine */
949 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
951 ret = a6xx_cp_init(gpu);
956 * Try to load a zap shader into the secure world. If successful
957 * we can use the CP to switch out of secure mode. If not then we
958 * have no resource but to try to switch ourselves out manually. If we
959 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
960 * be blocked and a permissions violation will soon follow.
962 ret = a6xx_zap_shader_init(gpu);
964 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
965 OUT_RING(gpu->rb[0], 0x00000000);
967 a6xx_flush(gpu, gpu->rb[0]);
968 if (!a6xx_idle(gpu, gpu->rb[0]))
970 } else if (ret == -ENODEV) {
972 * This device does not use zap shader (but print a warning
973 * just in case someone got their dt wrong.. hopefully they
974 * have a debug UART to realize the error of their ways...
975 * if you mess this up you are about to crash horribly)
977 dev_warn_once(gpu->dev->dev,
978 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
979 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
987 * Tell the GMU that we are done touching the GPU and it can start power
990 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
992 if (a6xx_gpu->gmu.legacy) {
993 /* Take the GMU out of its special boot mode */
994 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
1000 static void a6xx_dump(struct msm_gpu *gpu)
1002 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
1003 gpu_read(gpu, REG_A6XX_RBBM_STATUS));
1007 #define VBIF_RESET_ACK_TIMEOUT 100
1008 #define VBIF_RESET_ACK_MASK 0x00f0
1010 static void a6xx_recover(struct msm_gpu *gpu)
1012 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1013 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1016 adreno_dump_info(gpu);
1018 for (i = 0; i < 8; i++)
1019 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
1020 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
1026 * Turn off keep alive that might have been enabled by the hang
1029 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
1031 gpu->funcs->pm_suspend(gpu);
1032 gpu->funcs->pm_resume(gpu);
1034 msm_gpu_hw_init(gpu);
1037 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
1039 struct msm_gpu *gpu = arg;
1041 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
1043 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
1044 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
1045 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
1046 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
1051 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
1053 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
1055 if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
1058 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
1059 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
1060 dev_err_ratelimited(&gpu->pdev->dev,
1061 "CP | opcode error | possible opcode=0x%8.8X\n",
1065 if (status & A6XX_CP_INT_CP_UCODE_ERROR)
1066 dev_err_ratelimited(&gpu->pdev->dev,
1067 "CP ucode error interrupt\n");
1069 if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
1070 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
1071 gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
1073 if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
1074 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
1076 dev_err_ratelimited(&gpu->pdev->dev,
1077 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1078 val & (1 << 20) ? "READ" : "WRITE",
1079 (val & 0x3ffff), val);
1082 if (status & A6XX_CP_INT_CP_AHB_ERROR)
1083 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
1085 if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
1086 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
1088 if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
1089 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
1093 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
1095 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1096 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1097 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1100 * Force the GPU to stay on until after we finish
1101 * collecting information
1103 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
1105 DRM_DEV_ERROR(&gpu->pdev->dev,
1106 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1107 ring ? ring->id : -1, ring ? ring->seqno : 0,
1108 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
1109 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
1110 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
1111 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
1112 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
1113 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
1114 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
1116 /* Turn off the hangcheck timer to keep it from bothering us */
1117 del_timer(&gpu->hangcheck_timer);
1119 kthread_queue_work(gpu->worker, &gpu->recover_work);
1122 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
1124 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
1126 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
1128 if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
1129 a6xx_fault_detect_irq(gpu);
1131 if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
1132 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
1134 if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1135 a6xx_cp_hw_err_irq(gpu);
1137 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
1138 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
1140 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1141 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
1143 if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1144 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
1146 if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
1147 msm_gpu_retire(gpu);
1152 static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
1154 return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
1157 static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
1159 return msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
1162 static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
1164 llcc_slice_deactivate(a6xx_gpu->llc_slice);
1165 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
1168 static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
1170 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1171 struct msm_gpu *gpu = &adreno_gpu->base;
1172 u32 cntl1_regval = 0;
1174 if (IS_ERR(a6xx_gpu->llc_mmio))
1177 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
1178 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
1181 cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
1182 (gpu_scid << 15) | (gpu_scid << 20);
1186 * For targets with a MMU500, activate the slice but don't program the
1187 * register. The XBL will take care of that.
1189 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
1190 if (!a6xx_gpu->have_mmu500) {
1191 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
1193 gpuhtw_scid &= 0x1f;
1194 cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
1200 * Program the slice IDs for the various GPU blocks and GPU MMU
1203 if (a6xx_gpu->have_mmu500)
1204 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0),
1207 a6xx_llc_write(a6xx_gpu,
1208 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
1211 * Program cacheability overrides to not allocate cache
1212 * lines on a write miss
1214 a6xx_llc_rmw(a6xx_gpu,
1215 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
1220 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
1222 llcc_slice_putd(a6xx_gpu->llc_slice);
1223 llcc_slice_putd(a6xx_gpu->htw_llc_slice);
1226 static void a6xx_llc_slices_init(struct platform_device *pdev,
1227 struct a6xx_gpu *a6xx_gpu)
1229 struct device_node *phandle;
1232 * There is a different programming path for targets with an mmu500
1233 * attached, so detect if that is the case
1235 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
1236 a6xx_gpu->have_mmu500 = (phandle &&
1237 of_device_is_compatible(phandle, "arm,mmu-500"));
1238 of_node_put(phandle);
1240 if (a6xx_gpu->have_mmu500)
1241 a6xx_gpu->llc_mmio = NULL;
1243 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
1245 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
1246 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
1248 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
1249 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
1252 static int a6xx_pm_resume(struct msm_gpu *gpu)
1254 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1255 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1258 gpu->needs_hw_init = true;
1260 trace_msm_gpu_resume(0);
1262 ret = a6xx_gmu_resume(a6xx_gpu);
1266 msm_gpu_resume_devfreq(gpu);
1268 a6xx_llc_activate(a6xx_gpu);
1273 static int a6xx_pm_suspend(struct msm_gpu *gpu)
1275 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1276 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1279 trace_msm_gpu_suspend(0);
1281 a6xx_llc_deactivate(a6xx_gpu);
1283 devfreq_suspend_device(gpu->devfreq.devfreq);
1285 ret = a6xx_gmu_stop(a6xx_gpu);
1289 if (a6xx_gpu->shadow_bo)
1290 for (i = 0; i < gpu->nr_rings; i++)
1291 a6xx_gpu->shadow[i] = 0;
1296 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1298 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1299 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1300 static DEFINE_MUTEX(perfcounter_oob);
1302 mutex_lock(&perfcounter_oob);
1304 /* Force the GPU power on so we can read this register */
1305 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
1307 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
1308 REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
1310 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
1311 mutex_unlock(&perfcounter_oob);
1315 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
1317 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1318 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1320 return a6xx_gpu->cur_ring;
1323 static void a6xx_destroy(struct msm_gpu *gpu)
1325 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1326 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1328 if (a6xx_gpu->sqe_bo) {
1329 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
1330 drm_gem_object_put(a6xx_gpu->sqe_bo);
1333 if (a6xx_gpu->shadow_bo) {
1334 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
1335 drm_gem_object_put(a6xx_gpu->shadow_bo);
1338 a6xx_llc_slices_destroy(a6xx_gpu);
1340 a6xx_gmu_remove(a6xx_gpu);
1342 adreno_gpu_cleanup(adreno_gpu);
1347 static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
1349 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1350 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1351 u64 busy_cycles, busy_time;
1354 /* Only read the gpu busy if the hardware is already active */
1355 if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
1358 busy_cycles = gmu_read64(&a6xx_gpu->gmu,
1359 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
1360 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
1362 busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
1363 do_div(busy_time, 192);
1365 gpu->devfreq.busy_cycles = busy_cycles;
1367 pm_runtime_put(a6xx_gpu->gmu.dev);
1369 if (WARN_ON(busy_time > ~0LU))
1372 return (unsigned long)busy_time;
1375 static struct msm_gem_address_space *
1376 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
1378 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1379 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1380 struct iommu_domain *iommu;
1381 struct msm_mmu *mmu;
1382 struct msm_gem_address_space *aspace;
1385 iommu = iommu_domain_alloc(&platform_bus_type);
1390 * This allows GPU to set the bus attributes required to use system
1391 * cache on behalf of the iommu page table walker.
1393 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
1394 adreno_set_llc_attributes(iommu);
1396 mmu = msm_iommu_new(&pdev->dev, iommu);
1398 iommu_domain_free(iommu);
1399 return ERR_CAST(mmu);
1403 * Use the aperture start or SZ_16M, whichever is greater. This will
1404 * ensure that we align with the allocated pagetable range while still
1405 * allowing room in the lower 32 bits for GMEM and whatnot
1407 start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
1408 size = iommu->geometry.aperture_end - start + 1;
1410 aspace = msm_gem_address_space_create(mmu, "gpu",
1411 start & GENMASK_ULL(48, 0), size);
1413 if (IS_ERR(aspace) && !IS_ERR(mmu))
1414 mmu->funcs->destroy(mmu);
1419 static struct msm_gem_address_space *
1420 a6xx_create_private_address_space(struct msm_gpu *gpu)
1422 struct msm_mmu *mmu;
1424 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
1427 return ERR_CAST(mmu);
1429 return msm_gem_address_space_create(mmu,
1430 "gpu", 0x100000000ULL, 0x1ffffffffULL);
1433 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1435 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1436 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1438 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
1439 return a6xx_gpu->shadow[ring->id];
1441 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
1444 static u32 a618_get_speed_bin(u32 fuse)
1448 else if (fuse == 169)
1450 else if (fuse == 174)
1456 static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
1461 val = a618_get_speed_bin(fuse);
1463 if (val == UINT_MAX) {
1465 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
1473 static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
1476 u32 supp_hw = UINT_MAX;
1480 ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
1482 * -ENOENT means that the platform doesn't support speedbin which is
1485 if (ret == -ENOENT) {
1489 "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
1493 speedbin = le16_to_cpu(speedbin);
1495 supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
1498 ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
1505 static const struct adreno_gpu_funcs funcs = {
1507 .get_param = adreno_get_param,
1508 .hw_init = a6xx_hw_init,
1509 .pm_suspend = a6xx_pm_suspend,
1510 .pm_resume = a6xx_pm_resume,
1511 .recover = a6xx_recover,
1512 .submit = a6xx_submit,
1513 .active_ring = a6xx_active_ring,
1515 .destroy = a6xx_destroy,
1516 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1519 .gpu_busy = a6xx_gpu_busy,
1520 .gpu_get_freq = a6xx_gmu_get_freq,
1521 .gpu_set_freq = a6xx_gmu_set_freq,
1522 #if defined(CONFIG_DRM_MSM_GPU_STATE)
1523 .gpu_state_get = a6xx_gpu_state_get,
1524 .gpu_state_put = a6xx_gpu_state_put,
1526 .create_address_space = a6xx_create_address_space,
1527 .create_private_address_space = a6xx_create_private_address_space,
1528 .get_rptr = a6xx_get_rptr,
1530 .get_timestamp = a6xx_get_timestamp,
1533 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
1535 struct msm_drm_private *priv = dev->dev_private;
1536 struct platform_device *pdev = priv->gpu_pdev;
1537 struct adreno_platform_config *config = pdev->dev.platform_data;
1538 const struct adreno_info *info;
1539 struct device_node *node;
1540 struct a6xx_gpu *a6xx_gpu;
1541 struct adreno_gpu *adreno_gpu;
1542 struct msm_gpu *gpu;
1545 a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
1547 return ERR_PTR(-ENOMEM);
1549 adreno_gpu = &a6xx_gpu->base;
1550 gpu = &adreno_gpu->base;
1552 adreno_gpu->registers = NULL;
1555 * We need to know the platform type before calling into adreno_gpu_init
1556 * so that the hw_apriv flag can be correctly set. Snoop into the info
1557 * and grab the revision number
1559 info = adreno_info(config->rev);
1561 if (info && info->revn == 650)
1562 adreno_gpu->base.hw_apriv = true;
1564 a6xx_llc_slices_init(pdev, a6xx_gpu);
1566 ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
1568 a6xx_destroy(&(a6xx_gpu->base.base));
1569 return ERR_PTR(ret);
1572 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
1574 a6xx_destroy(&(a6xx_gpu->base.base));
1575 return ERR_PTR(ret);
1578 /* Check if there is a GMU phandle and set it up */
1579 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
1581 /* FIXME: How do we gracefully handle this? */
1584 ret = a6xx_gmu_init(a6xx_gpu, node);
1586 a6xx_destroy(&(a6xx_gpu->base.base));
1587 return ERR_PTR(ret);
1591 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
1592 a6xx_fault_handler);