Merge remote-tracking branch 'drm-misc/drm-misc-next-fixes' into drm-misc-fixes
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / adreno / a6xx_gmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
4 #include <linux/clk.h>
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
9 #include <drm/drm_gem.h>
10
11 #include "a6xx_gpu.h"
12 #include "a6xx_gmu.xml.h"
13 #include "msm_gem.h"
14 #include "msm_gpu_trace.h"
15 #include "msm_mmu.h"
16
17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
18 {
19         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
20         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
21         struct msm_gpu *gpu = &adreno_gpu->base;
22
23         /* FIXME: add a banner here */
24         gmu->hung = true;
25
26         /* Turn off the hangcheck timer while we are resetting */
27         del_timer(&gpu->hangcheck_timer);
28
29         /* Queue the GPU handler because we need to treat this as a recovery */
30         kthread_queue_work(gpu->worker, &gpu->recover_work);
31 }
32
33 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
34 {
35         struct a6xx_gmu *gmu = data;
36         u32 status;
37
38         status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
39         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
40
41         if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
42                 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
43
44                 a6xx_gmu_fault(gmu);
45         }
46
47         if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
48                 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
49
50         if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
51                 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
52                         gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
53
54         return IRQ_HANDLED;
55 }
56
57 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
58 {
59         struct a6xx_gmu *gmu = data;
60         u32 status;
61
62         status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
63         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
64
65         if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
66                 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
67
68                 a6xx_gmu_fault(gmu);
69         }
70
71         return IRQ_HANDLED;
72 }
73
74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
75 {
76         u32 val;
77
78         /* This can be called from gpu state code so make sure GMU is valid */
79         if (!gmu->initialized)
80                 return false;
81
82         val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
83
84         return !(val &
85                 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
86                 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
87 }
88
89 /* Check to see if the GX rail is still powered */
90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
91 {
92         u32 val;
93
94         /* This can be called from gpu state code so make sure GMU is valid */
95         if (!gmu->initialized)
96                 return false;
97
98         val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
99
100         return !(val &
101                 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
102                 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
103 }
104
105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
106 {
107         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
108         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
109         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
110         u32 perf_index;
111         unsigned long gpu_freq;
112         int ret = 0;
113
114         gpu_freq = dev_pm_opp_get_freq(opp);
115
116         if (gpu_freq == gmu->freq)
117                 return;
118
119         for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
120                 if (gpu_freq == gmu->gpu_freqs[perf_index])
121                         break;
122
123         gmu->current_perf_index = perf_index;
124         gmu->freq = gmu->gpu_freqs[perf_index];
125
126         trace_msm_gmu_freq_change(gmu->freq, perf_index);
127
128         /*
129          * This can get called from devfreq while the hardware is idle. Don't
130          * bring up the power if it isn't already active
131          */
132         if (pm_runtime_get_if_in_use(gmu->dev) == 0)
133                 return;
134
135         if (!gmu->legacy) {
136                 a6xx_hfi_set_freq(gmu, perf_index);
137                 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
138                 pm_runtime_put(gmu->dev);
139                 return;
140         }
141
142         gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
143
144         gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
145                         ((3 & 0xf) << 28) | perf_index);
146
147         /*
148          * Send an invalid index as a vote for the bus bandwidth and let the
149          * firmware decide on the right vote
150          */
151         gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
152
153         /* Set and clear the OOB for DCVS to trigger the GMU */
154         a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
155         a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
156
157         ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
158         if (ret)
159                 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
160
161         dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
162         pm_runtime_put(gmu->dev);
163 }
164
165 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
166 {
167         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
168         struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
169         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
170
171         return  gmu->freq;
172 }
173
174 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
175 {
176         u32 val;
177         int local = gmu->idle_level;
178
179         /* SPTP and IFPC both report as IFPC */
180         if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
181                 local = GMU_IDLE_STATE_IFPC;
182
183         val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
184
185         if (val == local) {
186                 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
187                         !a6xx_gmu_gx_is_on(gmu))
188                         return true;
189         }
190
191         return false;
192 }
193
194 /* Wait for the GMU to get to its most idle state */
195 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
196 {
197         return spin_until(a6xx_gmu_check_idle_level(gmu));
198 }
199
200 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
201 {
202         int ret;
203         u32 val;
204         u32 mask, reset_val;
205
206         val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
207         if (val <= 0x20010004) {
208                 mask = 0xffffffff;
209                 reset_val = 0xbabeface;
210         } else {
211                 mask = 0x1ff;
212                 reset_val = 0x100;
213         }
214
215         gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
216
217         /* Set the log wptr index
218          * note: downstream saves the value in poweroff and restores it here
219          */
220         gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
221
222         gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
223
224         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
225                 (val & mask) == reset_val, 100, 10000);
226
227         if (ret)
228                 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
229
230         return ret;
231 }
232
233 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
234 {
235         u32 val;
236         int ret;
237
238         gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
239
240         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
241                 val & 1, 100, 10000);
242         if (ret)
243                 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
244
245         return ret;
246 }
247
248 struct a6xx_gmu_oob_bits {
249         int set, ack, set_new, ack_new, clear, clear_new;
250         const char *name;
251 };
252
253 /* These are the interrupt / ack bits for each OOB request that are set
254  * in a6xx_gmu_set_oob and a6xx_clear_oob
255  */
256 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
257         [GMU_OOB_GPU_SET] = {
258                 .name = "GPU_SET",
259                 .set = 16,
260                 .ack = 24,
261                 .set_new = 30,
262                 .ack_new = 31,
263                 .clear = 24,
264                 .clear_new = 31,
265         },
266
267         [GMU_OOB_PERFCOUNTER_SET] = {
268                 .name = "PERFCOUNTER",
269                 .set = 17,
270                 .ack = 25,
271                 .set_new = 28,
272                 .ack_new = 30,
273                 .clear = 25,
274                 .clear_new = 29,
275         },
276
277         [GMU_OOB_BOOT_SLUMBER] = {
278                 .name = "BOOT_SLUMBER",
279                 .set = 22,
280                 .ack = 30,
281                 .clear = 30,
282         },
283
284         [GMU_OOB_DCVS_SET] = {
285                 .name = "GPU_DCVS",
286                 .set = 23,
287                 .ack = 31,
288                 .clear = 31,
289         },
290 };
291
292 /* Trigger a OOB (out of band) request to the GMU */
293 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
294 {
295         int ret;
296         u32 val;
297         int request, ack;
298
299         if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
300                 return -EINVAL;
301
302         if (gmu->legacy) {
303                 request = a6xx_gmu_oob_bits[state].set;
304                 ack = a6xx_gmu_oob_bits[state].ack;
305         } else {
306                 request = a6xx_gmu_oob_bits[state].set_new;
307                 ack = a6xx_gmu_oob_bits[state].ack_new;
308                 if (!request || !ack) {
309                         DRM_DEV_ERROR(gmu->dev,
310                                       "Invalid non-legacy GMU request %s\n",
311                                       a6xx_gmu_oob_bits[state].name);
312                         return -EINVAL;
313                 }
314         }
315
316         /* Trigger the equested OOB operation */
317         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
318
319         /* Wait for the acknowledge interrupt */
320         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
321                 val & (1 << ack), 100, 10000);
322
323         if (ret)
324                 DRM_DEV_ERROR(gmu->dev,
325                         "Timeout waiting for GMU OOB set %s: 0x%x\n",
326                                 a6xx_gmu_oob_bits[state].name,
327                                 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
328
329         /* Clear the acknowledge interrupt */
330         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
331
332         return ret;
333 }
334
335 /* Clear a pending OOB state in the GMU */
336 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
337 {
338         int bit;
339
340         if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
341                 return;
342
343         if (gmu->legacy)
344                 bit = a6xx_gmu_oob_bits[state].clear;
345         else
346                 bit = a6xx_gmu_oob_bits[state].clear_new;
347
348         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
349 }
350
351 /* Enable CPU control of SPTP power power collapse */
352 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
353 {
354         int ret;
355         u32 val;
356
357         if (!gmu->legacy)
358                 return 0;
359
360         gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
361
362         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
363                 (val & 0x38) == 0x28, 1, 100);
364
365         if (ret) {
366                 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
367                         gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
368         }
369
370         return 0;
371 }
372
373 /* Disable CPU control of SPTP power power collapse */
374 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
375 {
376         u32 val;
377         int ret;
378
379         if (!gmu->legacy)
380                 return;
381
382         /* Make sure retention is on */
383         gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
384
385         gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
386
387         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
388                 (val & 0x04), 100, 10000);
389
390         if (ret)
391                 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
392                         gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
393 }
394
395 /* Let the GMU know we are starting a boot sequence */
396 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
397 {
398         u32 vote;
399
400         /* Let the GMU know we are getting ready for boot */
401         gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
402
403         /* Choose the "default" power level as the highest available */
404         vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
405
406         gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
407         gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
408
409         /* Let the GMU know the boot sequence has started */
410         return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
411 }
412
413 /* Let the GMU know that we are about to go into slumber */
414 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
415 {
416         int ret;
417
418         /* Disable the power counter so the GMU isn't busy */
419         gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
420
421         /* Disable SPTP_PC if the CPU is responsible for it */
422         if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
423                 a6xx_sptprac_disable(gmu);
424
425         if (!gmu->legacy) {
426                 ret = a6xx_hfi_send_prep_slumber(gmu);
427                 goto out;
428         }
429
430         /* Tell the GMU to get ready to slumber */
431         gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
432
433         ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
434         a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
435
436         if (!ret) {
437                 /* Check to see if the GMU really did slumber */
438                 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
439                         != 0x0f) {
440                         DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
441                         ret = -ETIMEDOUT;
442                 }
443         }
444
445 out:
446         /* Put fence into allow mode */
447         gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
448         return ret;
449 }
450
451 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
452 {
453         int ret;
454         u32 val;
455
456         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
457         /* Wait for the register to finish posting */
458         wmb();
459
460         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
461                 val & (1 << 1), 100, 10000);
462         if (ret) {
463                 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
464                 return ret;
465         }
466
467         ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
468                 !val, 100, 10000);
469
470         if (ret) {
471                 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
472                 return ret;
473         }
474
475         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
476
477         /* Set up CX GMU counter 0 to count busy ticks */
478         gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
479         gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
480
481         /* Enable the power counter */
482         gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
483         return 0;
484 }
485
486 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
487 {
488         int ret;
489         u32 val;
490
491         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
492
493         ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
494                 val, val & (1 << 16), 100, 10000);
495         if (ret)
496                 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
497
498         gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
499 }
500
501 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
502 {
503         return msm_writel(value, ptr + (offset << 2));
504 }
505
506 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
507                 const char *name);
508
509 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
510 {
511         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
512         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
513         struct platform_device *pdev = to_platform_device(gmu->dev);
514         void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
515         void __iomem *seqptr;
516         uint32_t pdc_address_offset;
517         bool pdc_in_aop = false;
518
519         if (!pdcptr)
520                 goto err;
521
522         if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
523                 pdc_in_aop = true;
524         else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
525                 pdc_address_offset = 0x30090;
526         else
527                 pdc_address_offset = 0x30080;
528
529         if (!pdc_in_aop) {
530                 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
531                 if (!seqptr)
532                         goto err;
533         }
534
535         /* Disable SDE clock gating */
536         gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
537
538         /* Setup RSC PDC handshake for sleep and wakeup */
539         gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
540         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
541         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
542         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
543         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
544         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
545         gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
546         gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
547         gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
548         gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
549         gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
550
551         /* Load RSC sequencer uCode for sleep and wakeup */
552         if (adreno_is_a650_family(adreno_gpu)) {
553                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
554                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
555                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
556                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
557                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
558         } else {
559                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
560                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
561                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
562                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
563                 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
564         }
565
566         if (pdc_in_aop)
567                 goto setup_pdc;
568
569         /* Load PDC sequencer uCode for power up and power down sequence */
570         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
571         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
572         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
573         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
574         pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
575
576         /* Set TCS commands used by PDC sequence for low power modes */
577         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
578         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
579         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
580         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
581         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
582         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
583         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
584         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
585         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
586
587         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
588         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
589         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
590
591         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
592         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
593         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
594         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
595         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
596         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
597
598         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
599         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
600         if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
601                 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
602         else
603                 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
604         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
605         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
606         pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
607
608         /* Setup GPU PDC */
609 setup_pdc:
610         pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
611         pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
612
613         /* ensure no writes happen before the uCode is fully written */
614         wmb();
615
616 err:
617         if (!IS_ERR_OR_NULL(pdcptr))
618                 iounmap(pdcptr);
619         if (!IS_ERR_OR_NULL(seqptr))
620                 iounmap(seqptr);
621 }
622
623 /*
624  * The lowest 16 bits of this value are the number of XO clock cycles for main
625  * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
626  * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
627  */
628
629 #define GMU_PWR_COL_HYST 0x000a1680
630
631 /* Set up the idle state for the GMU */
632 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
633 {
634         /* Disable GMU WB/RB buffer */
635         gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
636         gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
637         gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
638
639         gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
640
641         switch (gmu->idle_level) {
642         case GMU_IDLE_STATE_IFPC:
643                 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
644                         GMU_PWR_COL_HYST);
645                 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
646                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
647                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
648                 fallthrough;
649         case GMU_IDLE_STATE_SPTP:
650                 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
651                         GMU_PWR_COL_HYST);
652                 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
653                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
654                         A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
655         }
656
657         /* Enable RPMh GPU client */
658         gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
659                 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
660                 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
661                 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
662                 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
663                 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
664                 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
665 }
666
667 struct block_header {
668         u32 addr;
669         u32 size;
670         u32 type;
671         u32 value;
672         u32 data[];
673 };
674
675 /* this should be a general kernel helper */
676 static int in_range(u32 addr, u32 start, u32 size)
677 {
678         return addr >= start && addr < start + size;
679 }
680
681 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
682 {
683         if (!in_range(blk->addr, bo->iova, bo->size))
684                 return false;
685
686         memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
687         return true;
688 }
689
690 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
691 {
692         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
693         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
694         const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
695         const struct block_header *blk;
696         u32 reg_offset;
697
698         u32 itcm_base = 0x00000000;
699         u32 dtcm_base = 0x00040000;
700
701         if (adreno_is_a650_family(adreno_gpu))
702                 dtcm_base = 0x10004000;
703
704         if (gmu->legacy) {
705                 /* Sanity check the size of the firmware that was loaded */
706                 if (fw_image->size > 0x8000) {
707                         DRM_DEV_ERROR(gmu->dev,
708                                 "GMU firmware is bigger than the available region\n");
709                         return -EINVAL;
710                 }
711
712                 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
713                                (u32*) fw_image->data, fw_image->size);
714                 return 0;
715         }
716
717
718         for (blk = (const struct block_header *) fw_image->data;
719              (const u8*) blk < fw_image->data + fw_image->size;
720              blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
721                 if (blk->size == 0)
722                         continue;
723
724                 if (in_range(blk->addr, itcm_base, SZ_16K)) {
725                         reg_offset = (blk->addr - itcm_base) >> 2;
726                         gmu_write_bulk(gmu,
727                                 REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
728                                 blk->data, blk->size);
729                 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
730                         reg_offset = (blk->addr - dtcm_base) >> 2;
731                         gmu_write_bulk(gmu,
732                                 REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
733                                 blk->data, blk->size);
734                 } else if (!fw_block_mem(&gmu->icache, blk) &&
735                            !fw_block_mem(&gmu->dcache, blk) &&
736                            !fw_block_mem(&gmu->dummy, blk)) {
737                         DRM_DEV_ERROR(gmu->dev,
738                                 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
739                                 blk->addr, blk->size, blk->data[0]);
740                 }
741         }
742
743         return 0;
744 }
745
746 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
747 {
748         static bool rpmh_init;
749         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
750         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
751         int ret;
752         u32 chipid;
753
754         if (adreno_is_a650_family(adreno_gpu)) {
755                 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
756                 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
757         }
758
759         if (state == GMU_WARM_BOOT) {
760                 ret = a6xx_rpmh_start(gmu);
761                 if (ret)
762                         return ret;
763         } else {
764                 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
765                         "GMU firmware is not loaded\n"))
766                         return -ENOENT;
767
768                 /* Turn on register retention */
769                 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
770
771                 /* We only need to load the RPMh microcode once */
772                 if (!rpmh_init) {
773                         a6xx_gmu_rpmh_init(gmu);
774                         rpmh_init = true;
775                 } else {
776                         ret = a6xx_rpmh_start(gmu);
777                         if (ret)
778                                 return ret;
779                 }
780
781                 ret = a6xx_gmu_fw_load(gmu);
782                 if (ret)
783                         return ret;
784         }
785
786         gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
787         gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
788
789         /* Write the iova of the HFI table */
790         gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
791         gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
792
793         gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
794                 (1 << 31) | (0xa << 18) | (0xa0));
795
796         chipid = adreno_gpu->rev.core << 24;
797         chipid |= adreno_gpu->rev.major << 16;
798         chipid |= adreno_gpu->rev.minor << 12;
799         chipid |= adreno_gpu->rev.patchid << 8;
800
801         gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
802
803         gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
804                   gmu->log.iova | (gmu->log.size / SZ_4K - 1));
805
806         /* Set up the lowest idle level on the GMU */
807         a6xx_gmu_power_config(gmu);
808
809         ret = a6xx_gmu_start(gmu);
810         if (ret)
811                 return ret;
812
813         if (gmu->legacy) {
814                 ret = a6xx_gmu_gfx_rail_on(gmu);
815                 if (ret)
816                         return ret;
817         }
818
819         /* Enable SPTP_PC if the CPU is responsible for it */
820         if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
821                 ret = a6xx_sptprac_enable(gmu);
822                 if (ret)
823                         return ret;
824         }
825
826         ret = a6xx_gmu_hfi_start(gmu);
827         if (ret)
828                 return ret;
829
830         /* FIXME: Do we need this wmb() here? */
831         wmb();
832
833         return 0;
834 }
835
836 #define A6XX_HFI_IRQ_MASK \
837         (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
838
839 #define A6XX_GMU_IRQ_MASK \
840         (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
841          A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
842          A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
843
844 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
845 {
846         disable_irq(gmu->gmu_irq);
847         disable_irq(gmu->hfi_irq);
848
849         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
850         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
851 }
852
853 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
854 {
855         u32 val;
856
857         /* Make sure there are no outstanding RPMh votes */
858         gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
859                 (val & 1), 100, 10000);
860         gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
861                 (val & 1), 100, 10000);
862         gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
863                 (val & 1), 100, 10000);
864         gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
865                 (val & 1), 100, 1000);
866 }
867
868 /* Force the GMU off in case it isn't responsive */
869 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
870 {
871         /* Flush all the queues */
872         a6xx_hfi_stop(gmu);
873
874         /* Stop the interrupts */
875         a6xx_gmu_irq_disable(gmu);
876
877         /* Force off SPTP in case the GMU is managing it */
878         a6xx_sptprac_disable(gmu);
879
880         /* Make sure there are no outstanding RPMh votes */
881         a6xx_gmu_rpmh_off(gmu);
882 }
883
884 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
885 {
886         struct dev_pm_opp *gpu_opp;
887         unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
888
889         gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
890         if (IS_ERR_OR_NULL(gpu_opp))
891                 return;
892
893         gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
894         a6xx_gmu_set_freq(gpu, gpu_opp);
895         dev_pm_opp_put(gpu_opp);
896 }
897
898 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
899 {
900         struct dev_pm_opp *gpu_opp;
901         unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
902
903         gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
904         if (IS_ERR_OR_NULL(gpu_opp))
905                 return;
906
907         dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
908         dev_pm_opp_put(gpu_opp);
909 }
910
911 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
912 {
913         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
914         struct msm_gpu *gpu = &adreno_gpu->base;
915         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
916         int status, ret;
917
918         if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
919                 return 0;
920
921         gmu->hung = false;
922
923         /* Turn on the resources */
924         pm_runtime_get_sync(gmu->dev);
925
926         /*
927          * "enable" the GX power domain which won't actually do anything but it
928          * will make sure that the refcounting is correct in case we need to
929          * bring down the GX after a GMU failure
930          */
931         if (!IS_ERR_OR_NULL(gmu->gxpd))
932                 pm_runtime_get_sync(gmu->gxpd);
933
934         /* Use a known rate to bring up the GMU */
935         clk_set_rate(gmu->core_clk, 200000000);
936         ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
937         if (ret) {
938                 pm_runtime_put(gmu->gxpd);
939                 pm_runtime_put(gmu->dev);
940                 return ret;
941         }
942
943         /* Set the bus quota to a reasonable value for boot */
944         a6xx_gmu_set_initial_bw(gpu, gmu);
945
946         /* Enable the GMU interrupt */
947         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
948         gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
949         enable_irq(gmu->gmu_irq);
950
951         /* Check to see if we are doing a cold or warm boot */
952         status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
953                 GMU_WARM_BOOT : GMU_COLD_BOOT;
954
955         /*
956          * Warm boot path does not work on newer GPUs
957          * Presumably this is because icache/dcache regions must be restored
958          */
959         if (!gmu->legacy)
960                 status = GMU_COLD_BOOT;
961
962         ret = a6xx_gmu_fw_start(gmu, status);
963         if (ret)
964                 goto out;
965
966         ret = a6xx_hfi_start(gmu, status);
967         if (ret)
968                 goto out;
969
970         /*
971          * Turn on the GMU firmware fault interrupt after we know the boot
972          * sequence is successful
973          */
974         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
975         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
976         enable_irq(gmu->hfi_irq);
977
978         /* Set the GPU to the current freq */
979         a6xx_gmu_set_initial_freq(gpu, gmu);
980
981 out:
982         /* On failure, shut down the GMU to leave it in a good state */
983         if (ret) {
984                 disable_irq(gmu->gmu_irq);
985                 a6xx_rpmh_stop(gmu);
986                 pm_runtime_put(gmu->gxpd);
987                 pm_runtime_put(gmu->dev);
988         }
989
990         return ret;
991 }
992
993 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
994 {
995         u32 reg;
996
997         if (!gmu->initialized)
998                 return true;
999
1000         reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1001
1002         if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1003                 return false;
1004
1005         return true;
1006 }
1007
1008 #define GBIF_CLIENT_HALT_MASK             BIT(0)
1009 #define GBIF_ARB_HALT_MASK                BIT(1)
1010
1011 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
1012 {
1013         struct msm_gpu *gpu = &adreno_gpu->base;
1014
1015         if (!a6xx_has_gbif(adreno_gpu)) {
1016                 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
1017                 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
1018                                                                 0xf) == 0xf);
1019                 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
1020
1021                 return;
1022         }
1023
1024         /* Halt new client requests on GBIF */
1025         gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
1026         spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1027                         (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
1028
1029         /* Halt all AXI requests on GBIF */
1030         gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
1031         spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
1032                         (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
1033
1034         /* The GBIF halt needs to be explicitly cleared */
1035         gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
1036 }
1037
1038 /* Gracefully try to shut down the GMU and by extension the GPU */
1039 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1040 {
1041         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1042         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1043         u32 val;
1044
1045         /*
1046          * The GMU may still be in slumber unless the GPU started so check and
1047          * skip putting it back into slumber if so
1048          */
1049         val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1050
1051         if (val != 0xf) {
1052                 int ret = a6xx_gmu_wait_for_idle(gmu);
1053
1054                 /* If the GMU isn't responding assume it is hung */
1055                 if (ret) {
1056                         a6xx_gmu_force_off(gmu);
1057                         return;
1058                 }
1059
1060                 a6xx_bus_clear_pending_transactions(adreno_gpu);
1061
1062                 /* tell the GMU we want to slumber */
1063                 a6xx_gmu_notify_slumber(gmu);
1064
1065                 ret = gmu_poll_timeout(gmu,
1066                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1067                         !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1068                         100, 10000);
1069
1070                 /*
1071                  * Let the user know we failed to slumber but don't worry too
1072                  * much because we are powering down anyway
1073                  */
1074
1075                 if (ret)
1076                         DRM_DEV_ERROR(gmu->dev,
1077                                 "Unable to slumber GMU: status = 0%x/0%x\n",
1078                                 gmu_read(gmu,
1079                                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1080                                 gmu_read(gmu,
1081                                         REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1082         }
1083
1084         /* Turn off HFI */
1085         a6xx_hfi_stop(gmu);
1086
1087         /* Stop the interrupts and mask the hardware */
1088         a6xx_gmu_irq_disable(gmu);
1089
1090         /* Tell RPMh to power off the GPU */
1091         a6xx_rpmh_stop(gmu);
1092 }
1093
1094
1095 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1096 {
1097         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1098         struct msm_gpu *gpu = &a6xx_gpu->base.base;
1099
1100         if (!pm_runtime_active(gmu->dev))
1101                 return 0;
1102
1103         /*
1104          * Force the GMU off if we detected a hang, otherwise try to shut it
1105          * down gracefully
1106          */
1107         if (gmu->hung)
1108                 a6xx_gmu_force_off(gmu);
1109         else
1110                 a6xx_gmu_shutdown(gmu);
1111
1112         /* Remove the bus vote */
1113         dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1114
1115         /*
1116          * Make sure the GX domain is off before turning off the GMU (CX)
1117          * domain. Usually the GMU does this but only if the shutdown sequence
1118          * was successful
1119          */
1120         if (!IS_ERR_OR_NULL(gmu->gxpd))
1121                 pm_runtime_put_sync(gmu->gxpd);
1122
1123         clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1124
1125         pm_runtime_put_sync(gmu->dev);
1126
1127         return 0;
1128 }
1129
1130 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1131 {
1132         msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
1133         msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
1134         msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
1135         msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
1136         msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
1137         msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
1138
1139         gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1140         msm_gem_address_space_put(gmu->aspace);
1141 }
1142
1143 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1144                 size_t size, u64 iova)
1145 {
1146         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1147         struct drm_device *dev = a6xx_gpu->base.base.dev;
1148         uint32_t flags = MSM_BO_WC;
1149         u64 range_start, range_end;
1150         int ret;
1151
1152         size = PAGE_ALIGN(size);
1153         if (!iova) {
1154                 /* no fixed address - use GMU's uncached range */
1155                 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1156                 range_end = 0x80000000;
1157         } else {
1158                 /* range for fixed address */
1159                 range_start = iova;
1160                 range_end = iova + size;
1161                 /* use IOMMU_PRIV for icache/dcache */
1162                 flags |= MSM_BO_MAP_PRIV;
1163         }
1164
1165         bo->obj = msm_gem_new(dev, size, flags);
1166         if (IS_ERR(bo->obj))
1167                 return PTR_ERR(bo->obj);
1168
1169         ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1170                 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
1171         if (ret) {
1172                 drm_gem_object_put(bo->obj);
1173                 return ret;
1174         }
1175
1176         bo->virt = msm_gem_get_vaddr(bo->obj);
1177         bo->size = size;
1178
1179         return 0;
1180 }
1181
1182 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1183 {
1184         struct iommu_domain *domain;
1185         struct msm_mmu *mmu;
1186
1187         domain = iommu_domain_alloc(&platform_bus_type);
1188         if (!domain)
1189                 return -ENODEV;
1190
1191         mmu = msm_iommu_new(gmu->dev, domain);
1192         gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1193         if (IS_ERR(gmu->aspace)) {
1194                 iommu_domain_free(domain);
1195                 return PTR_ERR(gmu->aspace);
1196         }
1197
1198         return 0;
1199 }
1200
1201 /* Return the 'arc-level' for the given frequency */
1202 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1203                                            unsigned long freq)
1204 {
1205         struct dev_pm_opp *opp;
1206         unsigned int val;
1207
1208         if (!freq)
1209                 return 0;
1210
1211         opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1212         if (IS_ERR(opp))
1213                 return 0;
1214
1215         val = dev_pm_opp_get_level(opp);
1216
1217         dev_pm_opp_put(opp);
1218
1219         return val;
1220 }
1221
1222 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1223                 unsigned long *freqs, int freqs_count, const char *id)
1224 {
1225         int i, j;
1226         const u16 *pri, *sec;
1227         size_t pri_count, sec_count;
1228
1229         pri = cmd_db_read_aux_data(id, &pri_count);
1230         if (IS_ERR(pri))
1231                 return PTR_ERR(pri);
1232         /*
1233          * The data comes back as an array of unsigned shorts so adjust the
1234          * count accordingly
1235          */
1236         pri_count >>= 1;
1237         if (!pri_count)
1238                 return -EINVAL;
1239
1240         sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1241         if (IS_ERR(sec))
1242                 return PTR_ERR(sec);
1243
1244         sec_count >>= 1;
1245         if (!sec_count)
1246                 return -EINVAL;
1247
1248         /* Construct a vote for each frequency */
1249         for (i = 0; i < freqs_count; i++) {
1250                 u8 pindex = 0, sindex = 0;
1251                 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1252
1253                 /* Get the primary index that matches the arc level */
1254                 for (j = 0; j < pri_count; j++) {
1255                         if (pri[j] >= level) {
1256                                 pindex = j;
1257                                 break;
1258                         }
1259                 }
1260
1261                 if (j == pri_count) {
1262                         DRM_DEV_ERROR(dev,
1263                                       "Level %u not found in the RPMh list\n",
1264                                       level);
1265                         DRM_DEV_ERROR(dev, "Available levels:\n");
1266                         for (j = 0; j < pri_count; j++)
1267                                 DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
1268
1269                         return -EINVAL;
1270                 }
1271
1272                 /*
1273                  * Look for a level in in the secondary list that matches. If
1274                  * nothing fits, use the maximum non zero vote
1275                  */
1276
1277                 for (j = 0; j < sec_count; j++) {
1278                         if (sec[j] >= level) {
1279                                 sindex = j;
1280                                 break;
1281                         } else if (sec[j]) {
1282                                 sindex = j;
1283                         }
1284                 }
1285
1286                 /* Construct the vote */
1287                 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1288                         (sindex << 8) | pindex;
1289         }
1290
1291         return 0;
1292 }
1293
1294 /*
1295  * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1296  * to construct the list of votes on the CPU and send it over. Query the RPMh
1297  * voltage levels and build the votes
1298  */
1299
1300 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1301 {
1302         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1303         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1304         struct msm_gpu *gpu = &adreno_gpu->base;
1305         int ret;
1306
1307         /* Build the GX votes */
1308         ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1309                 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1310
1311         /* Build the CX votes */
1312         ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1313                 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1314
1315         return ret;
1316 }
1317
1318 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1319                 u32 size)
1320 {
1321         int count = dev_pm_opp_get_opp_count(dev);
1322         struct dev_pm_opp *opp;
1323         int i, index = 0;
1324         unsigned long freq = 1;
1325
1326         /*
1327          * The OPP table doesn't contain the "off" frequency level so we need to
1328          * add 1 to the table size to account for it
1329          */
1330
1331         if (WARN(count + 1 > size,
1332                 "The GMU frequency table is being truncated\n"))
1333                 count = size - 1;
1334
1335         /* Set the "off" frequency */
1336         freqs[index++] = 0;
1337
1338         for (i = 0; i < count; i++) {
1339                 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1340                 if (IS_ERR(opp))
1341                         break;
1342
1343                 dev_pm_opp_put(opp);
1344                 freqs[index++] = freq++;
1345         }
1346
1347         return index;
1348 }
1349
1350 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1351 {
1352         struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1353         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1354         struct msm_gpu *gpu = &adreno_gpu->base;
1355
1356         int ret = 0;
1357
1358         /*
1359          * The GMU handles its own frequency switching so build a list of
1360          * available frequencies to send during initialization
1361          */
1362         ret = devm_pm_opp_of_add_table(gmu->dev);
1363         if (ret) {
1364                 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1365                 return ret;
1366         }
1367
1368         gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1369                 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1370
1371         /*
1372          * The GMU also handles GPU frequency switching so build a list
1373          * from the GPU OPP table
1374          */
1375         gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1376                 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1377
1378         gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1379
1380         /* Build the list of RPMh votes that we'll send to the GMU */
1381         return a6xx_gmu_rpmh_votes_init(gmu);
1382 }
1383
1384 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1385 {
1386         int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1387
1388         if (ret < 1)
1389                 return ret;
1390
1391         gmu->nr_clocks = ret;
1392
1393         gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1394                 gmu->nr_clocks, "gmu");
1395
1396         return 0;
1397 }
1398
1399 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1400                 const char *name)
1401 {
1402         void __iomem *ret;
1403         struct resource *res = platform_get_resource_byname(pdev,
1404                         IORESOURCE_MEM, name);
1405
1406         if (!res) {
1407                 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1408                 return ERR_PTR(-EINVAL);
1409         }
1410
1411         ret = ioremap(res->start, resource_size(res));
1412         if (!ret) {
1413                 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1414                 return ERR_PTR(-EINVAL);
1415         }
1416
1417         return ret;
1418 }
1419
1420 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1421                 const char *name, irq_handler_t handler)
1422 {
1423         int irq, ret;
1424
1425         irq = platform_get_irq_byname(pdev, name);
1426
1427         ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1428         if (ret) {
1429                 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1430                               name, ret);
1431                 return ret;
1432         }
1433
1434         disable_irq(irq);
1435
1436         return irq;
1437 }
1438
1439 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1440 {
1441         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1442         struct platform_device *pdev = to_platform_device(gmu->dev);
1443
1444         if (!gmu->initialized)
1445                 return;
1446
1447         pm_runtime_force_suspend(gmu->dev);
1448
1449         if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1450                 pm_runtime_disable(gmu->gxpd);
1451                 dev_pm_domain_detach(gmu->gxpd, false);
1452         }
1453
1454         iounmap(gmu->mmio);
1455         if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1456                 iounmap(gmu->rscc);
1457         gmu->mmio = NULL;
1458         gmu->rscc = NULL;
1459
1460         a6xx_gmu_memory_free(gmu);
1461
1462         free_irq(gmu->gmu_irq, gmu);
1463         free_irq(gmu->hfi_irq, gmu);
1464
1465         /* Drop reference taken in of_find_device_by_node */
1466         put_device(gmu->dev);
1467
1468         gmu->initialized = false;
1469 }
1470
1471 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1472 {
1473         struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1474         struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1475         struct platform_device *pdev = of_find_device_by_node(node);
1476         int ret;
1477
1478         if (!pdev)
1479                 return -ENODEV;
1480
1481         gmu->dev = &pdev->dev;
1482
1483         of_dma_configure(gmu->dev, node, true);
1484
1485         /* Fow now, don't do anything fancy until we get our feet under us */
1486         gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1487
1488         pm_runtime_enable(gmu->dev);
1489
1490         /* Get the list of clocks */
1491         ret = a6xx_gmu_clocks_probe(gmu);
1492         if (ret)
1493                 goto err_put_device;
1494
1495         ret = a6xx_gmu_memory_probe(gmu);
1496         if (ret)
1497                 goto err_put_device;
1498
1499
1500         /* A660 now requires handling "prealloc requests" in GMU firmware
1501          * For now just hardcode allocations based on the known firmware.
1502          * note: there is no indication that these correspond to "dummy" or
1503          * "debug" regions, but this "guess" allows reusing these BOs which
1504          * are otherwise unused by a660.
1505          */
1506         gmu->dummy.size = SZ_4K;
1507         if (adreno_is_a660(adreno_gpu)) {
1508                 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
1509                 if (ret)
1510                         goto err_memory;
1511
1512                 gmu->dummy.size = SZ_8K;
1513         }
1514
1515         /* Allocate memory for the GMU dummy page */
1516         ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
1517         if (ret)
1518                 goto err_memory;
1519
1520         if (adreno_is_a650_family(adreno_gpu)) {
1521                 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1522                         SZ_16M - SZ_16K, 0x04000);
1523                 if (ret)
1524                         goto err_memory;
1525         } else if (adreno_is_a640(adreno_gpu)) {
1526                 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1527                         SZ_256K - SZ_16K, 0x04000);
1528                 if (ret)
1529                         goto err_memory;
1530
1531                 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1532                         SZ_256K - SZ_16K, 0x44000);
1533                 if (ret)
1534                         goto err_memory;
1535         } else {
1536                 /* HFI v1, has sptprac */
1537                 gmu->legacy = true;
1538
1539                 /* Allocate memory for the GMU debug region */
1540                 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
1541                 if (ret)
1542                         goto err_memory;
1543         }
1544
1545         /* Allocate memory for for the HFI queues */
1546         ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
1547         if (ret)
1548                 goto err_memory;
1549
1550         /* Allocate memory for the GMU log region */
1551         ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
1552         if (ret)
1553                 goto err_memory;
1554
1555         /* Map the GMU registers */
1556         gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1557         if (IS_ERR(gmu->mmio)) {
1558                 ret = PTR_ERR(gmu->mmio);
1559                 goto err_memory;
1560         }
1561
1562         if (adreno_is_a650_family(adreno_gpu)) {
1563                 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1564                 if (IS_ERR(gmu->rscc))
1565                         goto err_mmio;
1566         } else {
1567                 gmu->rscc = gmu->mmio + 0x23000;
1568         }
1569
1570         /* Get the HFI and GMU interrupts */
1571         gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1572         gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1573
1574         if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1575                 goto err_mmio;
1576
1577         /*
1578          * Get a link to the GX power domain to reset the GPU in case of GMU
1579          * crash
1580          */
1581         gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1582
1583         /* Get the power levels for the GMU and GPU */
1584         a6xx_gmu_pwrlevels_probe(gmu);
1585
1586         /* Set up the HFI queues */
1587         a6xx_hfi_init(gmu);
1588
1589         gmu->initialized = true;
1590
1591         return 0;
1592
1593 err_mmio:
1594         iounmap(gmu->mmio);
1595         if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1596                 iounmap(gmu->rscc);
1597         free_irq(gmu->gmu_irq, gmu);
1598         free_irq(gmu->hfi_irq, gmu);
1599
1600         ret = -ENODEV;
1601
1602 err_memory:
1603         a6xx_gmu_memory_free(gmu);
1604 err_put_device:
1605         /* Drop reference taken in of_find_device_by_node */
1606         put_device(gmu->dev);
1607
1608         return ret;
1609 }