Merge branch 'acpica'
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / adreno / a6xx_gpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
4
5 #include "msm_gem.h"
6 #include "msm_mmu.h"
7 #include "msm_gpu_trace.h"
8 #include "a6xx_gpu.h"
9 #include "a6xx_gmu.xml.h"
10
11 #include <linux/devfreq.h>
12
13 #define GPU_PAS_ID 13
14
15 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
16 {
17         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
18         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
19
20         /* Check that the GMU is idle */
21         if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
22                 return false;
23
24         /* Check tha the CX master is idle */
25         if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
26                         ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
27                 return false;
28
29         return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
30                 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
31 }
32
33 bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
34 {
35         /* wait for CP to drain ringbuffer: */
36         if (!adreno_idle(gpu, ring))
37                 return false;
38
39         if (spin_until(_a6xx_check_idle(gpu))) {
40                 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
41                         gpu->name, __builtin_return_address(0),
42                         gpu_read(gpu, REG_A6XX_RBBM_STATUS),
43                         gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
44                         gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
45                         gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
46                 return false;
47         }
48
49         return true;
50 }
51
52 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
53 {
54         uint32_t wptr;
55         unsigned long flags;
56
57         spin_lock_irqsave(&ring->lock, flags);
58
59         /* Copy the shadow to the actual register */
60         ring->cur = ring->next;
61
62         /* Make sure to wrap wptr if we need to */
63         wptr = get_wptr(ring);
64
65         spin_unlock_irqrestore(&ring->lock, flags);
66
67         /* Make sure everything is posted before making a decision */
68         mb();
69
70         gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
71 }
72
73 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
74                 u64 iova)
75 {
76         OUT_PKT7(ring, CP_REG_TO_MEM, 3);
77         OUT_RING(ring, counter | (1 << 30) | (2 << 18));
78         OUT_RING(ring, lower_32_bits(iova));
79         OUT_RING(ring, upper_32_bits(iova));
80 }
81
82 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
83         struct msm_file_private *ctx)
84 {
85         unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
86         struct msm_drm_private *priv = gpu->dev->dev_private;
87         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
88         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
89         struct msm_ringbuffer *ring = submit->ring;
90         unsigned int i;
91
92         get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
93                 rbmemptr_stats(ring, index, cpcycles_start));
94
95         /*
96          * For PM4 the GMU register offsets are calculated from the base of the
97          * GPU registers so we need to add 0x1a800 to the register value on A630
98          * to get the right value from PM4.
99          */
100         get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
101                 rbmemptr_stats(ring, index, alwayson_start));
102
103         /* Invalidate CCU depth and color */
104         OUT_PKT7(ring, CP_EVENT_WRITE, 1);
105         OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
106
107         OUT_PKT7(ring, CP_EVENT_WRITE, 1);
108         OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
109
110         /* Submit the commands */
111         for (i = 0; i < submit->nr_cmds; i++) {
112                 switch (submit->cmd[i].type) {
113                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
114                         break;
115                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
116                         if (priv->lastctx == ctx)
117                                 break;
118                         /* fall-thru */
119                 case MSM_SUBMIT_CMD_BUF:
120                         OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
121                         OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
122                         OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
123                         OUT_RING(ring, submit->cmd[i].size);
124                         break;
125                 }
126         }
127
128         get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
129                 rbmemptr_stats(ring, index, cpcycles_end));
130         get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
131                 rbmemptr_stats(ring, index, alwayson_end));
132
133         /* Write the fence to the scratch register */
134         OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
135         OUT_RING(ring, submit->seqno);
136
137         /*
138          * Execute a CACHE_FLUSH_TS event. This will ensure that the
139          * timestamp is written to the memory and then triggers the interrupt
140          */
141         OUT_PKT7(ring, CP_EVENT_WRITE, 4);
142         OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
143         OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
144         OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
145         OUT_RING(ring, submit->seqno);
146
147         trace_msm_gpu_submit_flush(submit,
148                 gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
149                         REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
150
151         a6xx_flush(gpu, ring);
152 }
153
154 static const struct {
155         u32 offset;
156         u32 value;
157 } a6xx_hwcg[] = {
158         {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
159         {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
160         {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
161         {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
162         {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
163         {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
164         {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
165         {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
166         {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
167         {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
168         {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
169         {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
170         {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
171         {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
172         {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
173         {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
174         {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
175         {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
176         {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
177         {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
178         {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
179         {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
180         {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
181         {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
182         {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
183         {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
184         {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
185         {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
186         {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
187         {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
188         {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
189         {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
190         {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
191         {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
192         {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
193         {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
194         {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
195         {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
196         {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
197         {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
198         {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
199         {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
200         {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
201         {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
202         {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
203         {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
204         {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
205         {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
206         {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
207         {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
208         {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
209         {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
210         {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
211         {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
212         {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
213         {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
214         {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
215         {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
216         {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
217         {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
218         {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
219         {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
220         {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
221         {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
222         {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
223         {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
224         {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
225         {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
226         {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
227         {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
228         {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
229         {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
230         {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
231         {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
232         {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
233         {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
234         {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
235         {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
236         {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
237         {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
238         {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
239         {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
240         {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
241         {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
242         {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
243         {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
244         {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
245         {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
246         {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
247         {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
248         {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
249         {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
250         {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
251         {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
252         {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
253         {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
254         {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
255         {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
256         {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
257         {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
258         {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
259         {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
260         {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
261         {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
262         {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
263 };
264
265 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
266 {
267         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
268         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
269         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
270         unsigned int i;
271         u32 val;
272
273         val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
274
275         /* Don't re-program the registers if they are already correct */
276         if ((!state && !val) || (state && (val == 0x8aa8aa02)))
277                 return;
278
279         /* Disable SP clock before programming HWCG registers */
280         gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
281
282         for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
283                 gpu_write(gpu, a6xx_hwcg[i].offset,
284                         state ? a6xx_hwcg[i].value : 0);
285
286         /* Enable SP clock */
287         gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
288
289         gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
290 }
291
292 static int a6xx_cp_init(struct msm_gpu *gpu)
293 {
294         struct msm_ringbuffer *ring = gpu->rb[0];
295
296         OUT_PKT7(ring, CP_ME_INIT, 8);
297
298         OUT_RING(ring, 0x0000002f);
299
300         /* Enable multiple hardware contexts */
301         OUT_RING(ring, 0x00000003);
302
303         /* Enable error detection */
304         OUT_RING(ring, 0x20000000);
305
306         /* Don't enable header dump */
307         OUT_RING(ring, 0x00000000);
308         OUT_RING(ring, 0x00000000);
309
310         /* No workarounds enabled */
311         OUT_RING(ring, 0x00000000);
312
313         /* Pad rest of the cmds with 0's */
314         OUT_RING(ring, 0x00000000);
315         OUT_RING(ring, 0x00000000);
316
317         a6xx_flush(gpu, ring);
318         return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
319 }
320
321 static int a6xx_ucode_init(struct msm_gpu *gpu)
322 {
323         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
324         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
325
326         if (!a6xx_gpu->sqe_bo) {
327                 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
328                         adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
329
330                 if (IS_ERR(a6xx_gpu->sqe_bo)) {
331                         int ret = PTR_ERR(a6xx_gpu->sqe_bo);
332
333                         a6xx_gpu->sqe_bo = NULL;
334                         DRM_DEV_ERROR(&gpu->pdev->dev,
335                                 "Could not allocate SQE ucode: %d\n", ret);
336
337                         return ret;
338                 }
339
340                 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
341         }
342
343         gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
344                 REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
345
346         return 0;
347 }
348
349 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
350 {
351         static bool loaded;
352         int ret;
353
354         if (loaded)
355                 return 0;
356
357         ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
358
359         loaded = !ret;
360         return ret;
361 }
362
363 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
364           A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
365           A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
366           A6XX_RBBM_INT_0_MASK_CP_IB2 | \
367           A6XX_RBBM_INT_0_MASK_CP_IB1 | \
368           A6XX_RBBM_INT_0_MASK_CP_RB | \
369           A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
370           A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
371           A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
372           A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
373           A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
374
375 static int a6xx_hw_init(struct msm_gpu *gpu)
376 {
377         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
378         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
379         int ret;
380
381         /* Make sure the GMU keeps the GPU on while we set it up */
382         a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
383
384         gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
385
386         /*
387          * Disable the trusted memory range - we don't actually supported secure
388          * memory rendering at this point in time and we don't want to block off
389          * part of the virtual memory space.
390          */
391         gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
392                 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
393         gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
394
395         /* Turn on 64 bit addressing for all blocks */
396         gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
397         gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
398         gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
399         gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
400         gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
401         gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
402         gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
403         gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
404         gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
405         gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
406         gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
407         gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
408
409         /*
410          * enable hardware clockgating
411          * For now enable clock gating only for a630
412          */
413         if (adreno_is_a630(adreno_gpu))
414                 a6xx_set_hwcg(gpu, true);
415
416         /* VBIF/GBIF start*/
417         gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
418         if (adreno_is_a630(adreno_gpu))
419                 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
420
421         /* Make all blocks contribute to the GPU BUSY perf counter */
422         gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
423
424         /* Disable L2 bypass in the UCHE */
425         gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
426         gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
427         gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
428         gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
429         gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
430         gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
431
432         /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
433         gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
434                 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
435
436         gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
437                 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
438                 0x00100000 + adreno_gpu->gmem - 1);
439
440         gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
441         gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
442
443         gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
444         gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
445
446         /* Setting the mem pool size */
447         gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
448
449         /* Setting the primFifo thresholds default values */
450         gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
451
452         /* Set the AHB default slave response to "ERROR" */
453         gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
454
455         /* Turn on performance counters */
456         gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
457
458         /* Select CP0 to always count cycles */
459         gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
460
461         if (adreno_is_a630(adreno_gpu)) {
462                 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
463                 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
464                 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
465                 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
466         }
467
468         /* Enable fault detection */
469         gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
470                 (1 << 30) | 0x1fffff);
471
472         gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
473
474         /* Protect registers from the CP */
475         gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
476
477         gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
478                 A6XX_PROTECT_RDONLY(0x600, 0x51));
479         gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
480         gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
481         gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
482         gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
483         gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
484         gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
485         gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
486                 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
487         gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
488         gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
489         gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
490         gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
491                 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
492         gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
493                 A6XX_PROTECT_RDONLY(0x501, 0xa));
494         gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
495                 A6XX_PROTECT_RDONLY(0x511, 0x44));
496         gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
497         gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
498         gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
499         gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
500         gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
501                 A6XX_PROTECT_RW(0xbe20, 0x11f3));
502         gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
503         gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
504         gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
505         gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
506         gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
507         gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
508                         A6XX_PROTECT_RDONLY(0x980, 0x4));
509         gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
510
511         /* Enable interrupts */
512         gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
513
514         ret = adreno_hw_init(gpu);
515         if (ret)
516                 goto out;
517
518         ret = a6xx_ucode_init(gpu);
519         if (ret)
520                 goto out;
521
522         /* Always come up on rb 0 */
523         a6xx_gpu->cur_ring = gpu->rb[0];
524
525         /* Enable the SQE_to start the CP engine */
526         gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
527
528         ret = a6xx_cp_init(gpu);
529         if (ret)
530                 goto out;
531
532         /*
533          * Try to load a zap shader into the secure world. If successful
534          * we can use the CP to switch out of secure mode. If not then we
535          * have no resource but to try to switch ourselves out manually. If we
536          * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
537          * be blocked and a permissions violation will soon follow.
538          */
539         ret = a6xx_zap_shader_init(gpu);
540         if (!ret) {
541                 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
542                 OUT_RING(gpu->rb[0], 0x00000000);
543
544                 a6xx_flush(gpu, gpu->rb[0]);
545                 if (!a6xx_idle(gpu, gpu->rb[0]))
546                         return -EINVAL;
547         } else if (ret == -ENODEV) {
548                 /*
549                  * This device does not use zap shader (but print a warning
550                  * just in case someone got their dt wrong.. hopefully they
551                  * have a debug UART to realize the error of their ways...
552                  * if you mess this up you are about to crash horribly)
553                  */
554                 dev_warn_once(gpu->dev->dev,
555                         "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
556                 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
557                 ret = 0;
558         } else {
559                 return ret;
560         }
561
562 out:
563         /*
564          * Tell the GMU that we are done touching the GPU and it can start power
565          * management
566          */
567         a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
568
569         /* Take the GMU out of its special boot mode */
570         a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
571
572         return ret;
573 }
574
575 static void a6xx_dump(struct msm_gpu *gpu)
576 {
577         DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
578                         gpu_read(gpu, REG_A6XX_RBBM_STATUS));
579         adreno_dump(gpu);
580 }
581
582 #define VBIF_RESET_ACK_TIMEOUT  100
583 #define VBIF_RESET_ACK_MASK     0x00f0
584
585 static void a6xx_recover(struct msm_gpu *gpu)
586 {
587         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
588         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
589         int i;
590
591         adreno_dump_info(gpu);
592
593         for (i = 0; i < 8; i++)
594                 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
595                         gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
596
597         if (hang_debug)
598                 a6xx_dump(gpu);
599
600         /*
601          * Turn off keep alive that might have been enabled by the hang
602          * interrupt
603          */
604         gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
605
606         gpu->funcs->pm_suspend(gpu);
607         gpu->funcs->pm_resume(gpu);
608
609         msm_gpu_hw_init(gpu);
610 }
611
612 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
613 {
614         struct msm_gpu *gpu = arg;
615
616         pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
617                         iova, flags,
618                         gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
619                         gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
620                         gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
621                         gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
622
623         return -EFAULT;
624 }
625
626 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
627 {
628         u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
629
630         if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
631                 u32 val;
632
633                 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
634                 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
635                 dev_err_ratelimited(&gpu->pdev->dev,
636                         "CP | opcode error | possible opcode=0x%8.8X\n",
637                         val);
638         }
639
640         if (status & A6XX_CP_INT_CP_UCODE_ERROR)
641                 dev_err_ratelimited(&gpu->pdev->dev,
642                         "CP ucode error interrupt\n");
643
644         if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
645                 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
646                         gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
647
648         if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
649                 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
650
651                 dev_err_ratelimited(&gpu->pdev->dev,
652                         "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
653                         val & (1 << 20) ? "READ" : "WRITE",
654                         (val & 0x3ffff), val);
655         }
656
657         if (status & A6XX_CP_INT_CP_AHB_ERROR)
658                 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
659
660         if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
661                 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
662
663         if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
664                 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
665
666 }
667
668 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
669 {
670         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
671         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
672         struct drm_device *dev = gpu->dev;
673         struct msm_drm_private *priv = dev->dev_private;
674         struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
675
676         /*
677          * Force the GPU to stay on until after we finish
678          * collecting information
679          */
680         gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
681
682         DRM_DEV_ERROR(&gpu->pdev->dev,
683                 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
684                 ring ? ring->id : -1, ring ? ring->seqno : 0,
685                 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
686                 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
687                 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
688                 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
689                 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
690                 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
691                 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
692
693         /* Turn off the hangcheck timer to keep it from bothering us */
694         del_timer(&gpu->hangcheck_timer);
695
696         queue_work(priv->wq, &gpu->recover_work);
697 }
698
699 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
700 {
701         u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
702
703         gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
704
705         if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
706                 a6xx_fault_detect_irq(gpu);
707
708         if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
709                 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
710
711         if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
712                 a6xx_cp_hw_err_irq(gpu);
713
714         if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
715                 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
716
717         if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
718                 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
719
720         if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
721                 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
722
723         if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
724                 msm_gpu_retire(gpu);
725
726         return IRQ_HANDLED;
727 }
728
729 static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
730         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
731         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
732         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
733                 REG_A6XX_CP_RB_RPTR_ADDR_LO),
734         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
735                 REG_A6XX_CP_RB_RPTR_ADDR_HI),
736         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
737         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
738         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
739 };
740
741 static int a6xx_pm_resume(struct msm_gpu *gpu)
742 {
743         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
744         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
745         int ret;
746
747         gpu->needs_hw_init = true;
748
749         ret = a6xx_gmu_resume(a6xx_gpu);
750         if (ret)
751                 return ret;
752
753         msm_gpu_resume_devfreq(gpu);
754
755         return 0;
756 }
757
758 static int a6xx_pm_suspend(struct msm_gpu *gpu)
759 {
760         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
761         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
762
763         devfreq_suspend_device(gpu->devfreq.devfreq);
764
765         return a6xx_gmu_stop(a6xx_gpu);
766 }
767
768 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
769 {
770         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
771         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
772
773         /* Force the GPU power on so we can read this register */
774         a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
775
776         *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
777                 REG_A6XX_RBBM_PERFCTR_CP_0_HI);
778
779         a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
780         return 0;
781 }
782
783 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
784 {
785         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
786         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
787
788         return a6xx_gpu->cur_ring;
789 }
790
791 static void a6xx_destroy(struct msm_gpu *gpu)
792 {
793         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
794         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
795
796         if (a6xx_gpu->sqe_bo) {
797                 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
798                 drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
799         }
800
801         a6xx_gmu_remove(a6xx_gpu);
802
803         adreno_gpu_cleanup(adreno_gpu);
804         kfree(a6xx_gpu);
805 }
806
807 static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
808 {
809         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
810         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
811         u64 busy_cycles, busy_time;
812
813         busy_cycles = gmu_read64(&a6xx_gpu->gmu,
814                         REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
815                         REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
816
817         busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
818         do_div(busy_time, 192);
819
820         gpu->devfreq.busy_cycles = busy_cycles;
821
822         if (WARN_ON(busy_time > ~0LU))
823                 return ~0LU;
824
825         return (unsigned long)busy_time;
826 }
827
828 static const struct adreno_gpu_funcs funcs = {
829         .base = {
830                 .get_param = adreno_get_param,
831                 .hw_init = a6xx_hw_init,
832                 .pm_suspend = a6xx_pm_suspend,
833                 .pm_resume = a6xx_pm_resume,
834                 .recover = a6xx_recover,
835                 .submit = a6xx_submit,
836                 .flush = a6xx_flush,
837                 .active_ring = a6xx_active_ring,
838                 .irq = a6xx_irq,
839                 .destroy = a6xx_destroy,
840 #if defined(CONFIG_DRM_MSM_GPU_STATE)
841                 .show = a6xx_show,
842 #endif
843                 .gpu_busy = a6xx_gpu_busy,
844                 .gpu_get_freq = a6xx_gmu_get_freq,
845                 .gpu_set_freq = a6xx_gmu_set_freq,
846 #if defined(CONFIG_DRM_MSM_GPU_STATE)
847                 .gpu_state_get = a6xx_gpu_state_get,
848                 .gpu_state_put = a6xx_gpu_state_put,
849 #endif
850         },
851         .get_timestamp = a6xx_get_timestamp,
852 };
853
854 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
855 {
856         struct msm_drm_private *priv = dev->dev_private;
857         struct platform_device *pdev = priv->gpu_pdev;
858         struct device_node *node;
859         struct a6xx_gpu *a6xx_gpu;
860         struct adreno_gpu *adreno_gpu;
861         struct msm_gpu *gpu;
862         int ret;
863
864         a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
865         if (!a6xx_gpu)
866                 return ERR_PTR(-ENOMEM);
867
868         adreno_gpu = &a6xx_gpu->base;
869         gpu = &adreno_gpu->base;
870
871         adreno_gpu->registers = NULL;
872         adreno_gpu->reg_offsets = a6xx_register_offsets;
873
874         ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
875         if (ret) {
876                 a6xx_destroy(&(a6xx_gpu->base.base));
877                 return ERR_PTR(ret);
878         }
879
880         /* Check if there is a GMU phandle and set it up */
881         node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
882
883         /* FIXME: How do we gracefully handle this? */
884         BUG_ON(!node);
885
886         ret = a6xx_gmu_init(a6xx_gpu, node);
887         if (ret) {
888                 a6xx_destroy(&(a6xx_gpu->base.base));
889                 return ERR_PTR(ret);
890         }
891
892         if (gpu->aspace)
893                 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
894                                 a6xx_fault_handler);
895
896         return gpu;
897 }