drm/etnaviv: show identity information in debugfs
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_gpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/clk.h>
7 #include <linux/component.h>
8 #include <linux/delay.h>
9 #include <linux/dma-fence.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/thermal.h>
17
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_dump.h"
20 #include "etnaviv_gpu.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_mmu.h"
23 #include "etnaviv_perfmon.h"
24 #include "etnaviv_sched.h"
25 #include "common.xml.h"
26 #include "state.xml.h"
27 #include "state_hi.xml.h"
28 #include "cmdstream.xml.h"
29
30 #ifndef PHYS_OFFSET
31 #define PHYS_OFFSET 0
32 #endif
33
34 static const struct platform_device_id gpu_ids[] = {
35         { .name = "etnaviv-gpu,2d" },
36         { },
37 };
38
39 /*
40  * Driver functions:
41  */
42
43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
44 {
45         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
46
47         switch (param) {
48         case ETNAVIV_PARAM_GPU_MODEL:
49                 *value = gpu->identity.model;
50                 break;
51
52         case ETNAVIV_PARAM_GPU_REVISION:
53                 *value = gpu->identity.revision;
54                 break;
55
56         case ETNAVIV_PARAM_GPU_FEATURES_0:
57                 *value = gpu->identity.features;
58                 break;
59
60         case ETNAVIV_PARAM_GPU_FEATURES_1:
61                 *value = gpu->identity.minor_features0;
62                 break;
63
64         case ETNAVIV_PARAM_GPU_FEATURES_2:
65                 *value = gpu->identity.minor_features1;
66                 break;
67
68         case ETNAVIV_PARAM_GPU_FEATURES_3:
69                 *value = gpu->identity.minor_features2;
70                 break;
71
72         case ETNAVIV_PARAM_GPU_FEATURES_4:
73                 *value = gpu->identity.minor_features3;
74                 break;
75
76         case ETNAVIV_PARAM_GPU_FEATURES_5:
77                 *value = gpu->identity.minor_features4;
78                 break;
79
80         case ETNAVIV_PARAM_GPU_FEATURES_6:
81                 *value = gpu->identity.minor_features5;
82                 break;
83
84         case ETNAVIV_PARAM_GPU_FEATURES_7:
85                 *value = gpu->identity.minor_features6;
86                 break;
87
88         case ETNAVIV_PARAM_GPU_FEATURES_8:
89                 *value = gpu->identity.minor_features7;
90                 break;
91
92         case ETNAVIV_PARAM_GPU_FEATURES_9:
93                 *value = gpu->identity.minor_features8;
94                 break;
95
96         case ETNAVIV_PARAM_GPU_FEATURES_10:
97                 *value = gpu->identity.minor_features9;
98                 break;
99
100         case ETNAVIV_PARAM_GPU_FEATURES_11:
101                 *value = gpu->identity.minor_features10;
102                 break;
103
104         case ETNAVIV_PARAM_GPU_FEATURES_12:
105                 *value = gpu->identity.minor_features11;
106                 break;
107
108         case ETNAVIV_PARAM_GPU_STREAM_COUNT:
109                 *value = gpu->identity.stream_count;
110                 break;
111
112         case ETNAVIV_PARAM_GPU_REGISTER_MAX:
113                 *value = gpu->identity.register_max;
114                 break;
115
116         case ETNAVIV_PARAM_GPU_THREAD_COUNT:
117                 *value = gpu->identity.thread_count;
118                 break;
119
120         case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
121                 *value = gpu->identity.vertex_cache_size;
122                 break;
123
124         case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
125                 *value = gpu->identity.shader_core_count;
126                 break;
127
128         case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
129                 *value = gpu->identity.pixel_pipes;
130                 break;
131
132         case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
133                 *value = gpu->identity.vertex_output_buffer_size;
134                 break;
135
136         case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
137                 *value = gpu->identity.buffer_size;
138                 break;
139
140         case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
141                 *value = gpu->identity.instruction_count;
142                 break;
143
144         case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
145                 *value = gpu->identity.num_constants;
146                 break;
147
148         case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
149                 *value = gpu->identity.varyings_count;
150                 break;
151
152         case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
153                 if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
154                         *value = ETNAVIV_SOFTPIN_START_ADDRESS;
155                 else
156                         *value = ~0ULL;
157                 break;
158
159         default:
160                 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
161                 return -EINVAL;
162         }
163
164         return 0;
165 }
166
167
168 #define etnaviv_is_model_rev(gpu, mod, rev) \
169         ((gpu)->identity.model == chipModel_##mod && \
170          (gpu)->identity.revision == rev)
171 #define etnaviv_field(val, field) \
172         (((val) & field##__MASK) >> field##__SHIFT)
173
174 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
175 {
176         if (gpu->identity.minor_features0 &
177             chipMinorFeatures0_MORE_MINOR_FEATURES) {
178                 u32 specs[4];
179                 unsigned int streams;
180
181                 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
182                 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
183                 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
184                 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
185
186                 gpu->identity.stream_count = etnaviv_field(specs[0],
187                                         VIVS_HI_CHIP_SPECS_STREAM_COUNT);
188                 gpu->identity.register_max = etnaviv_field(specs[0],
189                                         VIVS_HI_CHIP_SPECS_REGISTER_MAX);
190                 gpu->identity.thread_count = etnaviv_field(specs[0],
191                                         VIVS_HI_CHIP_SPECS_THREAD_COUNT);
192                 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
193                                         VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
194                 gpu->identity.shader_core_count = etnaviv_field(specs[0],
195                                         VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
196                 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
197                                         VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
198                 gpu->identity.vertex_output_buffer_size =
199                         etnaviv_field(specs[0],
200                                 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
201
202                 gpu->identity.buffer_size = etnaviv_field(specs[1],
203                                         VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
204                 gpu->identity.instruction_count = etnaviv_field(specs[1],
205                                         VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
206                 gpu->identity.num_constants = etnaviv_field(specs[1],
207                                         VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
208
209                 gpu->identity.varyings_count = etnaviv_field(specs[2],
210                                         VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
211
212                 /* This overrides the value from older register if non-zero */
213                 streams = etnaviv_field(specs[3],
214                                         VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
215                 if (streams)
216                         gpu->identity.stream_count = streams;
217         }
218
219         /* Fill in the stream count if not specified */
220         if (gpu->identity.stream_count == 0) {
221                 if (gpu->identity.model >= 0x1000)
222                         gpu->identity.stream_count = 4;
223                 else
224                         gpu->identity.stream_count = 1;
225         }
226
227         /* Convert the register max value */
228         if (gpu->identity.register_max)
229                 gpu->identity.register_max = 1 << gpu->identity.register_max;
230         else if (gpu->identity.model == chipModel_GC400)
231                 gpu->identity.register_max = 32;
232         else
233                 gpu->identity.register_max = 64;
234
235         /* Convert thread count */
236         if (gpu->identity.thread_count)
237                 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
238         else if (gpu->identity.model == chipModel_GC400)
239                 gpu->identity.thread_count = 64;
240         else if (gpu->identity.model == chipModel_GC500 ||
241                  gpu->identity.model == chipModel_GC530)
242                 gpu->identity.thread_count = 128;
243         else
244                 gpu->identity.thread_count = 256;
245
246         if (gpu->identity.vertex_cache_size == 0)
247                 gpu->identity.vertex_cache_size = 8;
248
249         if (gpu->identity.shader_core_count == 0) {
250                 if (gpu->identity.model >= 0x1000)
251                         gpu->identity.shader_core_count = 2;
252                 else
253                         gpu->identity.shader_core_count = 1;
254         }
255
256         if (gpu->identity.pixel_pipes == 0)
257                 gpu->identity.pixel_pipes = 1;
258
259         /* Convert virtex buffer size */
260         if (gpu->identity.vertex_output_buffer_size) {
261                 gpu->identity.vertex_output_buffer_size =
262                         1 << gpu->identity.vertex_output_buffer_size;
263         } else if (gpu->identity.model == chipModel_GC400) {
264                 if (gpu->identity.revision < 0x4000)
265                         gpu->identity.vertex_output_buffer_size = 512;
266                 else if (gpu->identity.revision < 0x4200)
267                         gpu->identity.vertex_output_buffer_size = 256;
268                 else
269                         gpu->identity.vertex_output_buffer_size = 128;
270         } else {
271                 gpu->identity.vertex_output_buffer_size = 512;
272         }
273
274         switch (gpu->identity.instruction_count) {
275         case 0:
276                 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
277                     gpu->identity.model == chipModel_GC880)
278                         gpu->identity.instruction_count = 512;
279                 else
280                         gpu->identity.instruction_count = 256;
281                 break;
282
283         case 1:
284                 gpu->identity.instruction_count = 1024;
285                 break;
286
287         case 2:
288                 gpu->identity.instruction_count = 2048;
289                 break;
290
291         default:
292                 gpu->identity.instruction_count = 256;
293                 break;
294         }
295
296         if (gpu->identity.num_constants == 0)
297                 gpu->identity.num_constants = 168;
298
299         if (gpu->identity.varyings_count == 0) {
300                 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
301                         gpu->identity.varyings_count = 12;
302                 else
303                         gpu->identity.varyings_count = 8;
304         }
305
306         /*
307          * For some cores, two varyings are consumed for position, so the
308          * maximum varying count needs to be reduced by one.
309          */
310         if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
311             etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
312             etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
313             etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
314             etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
315             etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
316             etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
317             etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
318             etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
319             etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
320             etnaviv_is_model_rev(gpu, GC880, 0x5106))
321                 gpu->identity.varyings_count -= 1;
322 }
323
324 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
325 {
326         u32 chipIdentity;
327
328         chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
329
330         /* Special case for older graphic cores. */
331         if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
332                 gpu->identity.model    = chipModel_GC500;
333                 gpu->identity.revision = etnaviv_field(chipIdentity,
334                                          VIVS_HI_CHIP_IDENTITY_REVISION);
335         } else {
336                 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
337
338                 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
339                 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
340                 gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
341                 gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
342                 gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
343
344                 /*
345                  * !!!! HACK ALERT !!!!
346                  * Because people change device IDs without letting software
347                  * know about it - here is the hack to make it all look the
348                  * same.  Only for GC400 family.
349                  */
350                 if ((gpu->identity.model & 0xff00) == 0x0400 &&
351                     gpu->identity.model != chipModel_GC420) {
352                         gpu->identity.model = gpu->identity.model & 0x0400;
353                 }
354
355                 /* Another special case */
356                 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
357                         u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
358
359                         if (chipDate == 0x20080814 && chipTime == 0x12051100) {
360                                 /*
361                                  * This IP has an ECO; put the correct
362                                  * revision in it.
363                                  */
364                                 gpu->identity.revision = 0x1051;
365                         }
366                 }
367
368                 /*
369                  * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
370                  * reality it's just a re-branded GC3000. We can identify this
371                  * core by the upper half of the revision register being all 1.
372                  * Fix model/rev here, so all other places can refer to this
373                  * core by its real identity.
374                  */
375                 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
376                         gpu->identity.model = chipModel_GC3000;
377                         gpu->identity.revision &= 0xffff;
378                 }
379
380                 if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
381                         gpu->identity.eco_id = 1;
382
383                 if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
384                         gpu->identity.eco_id = 1;
385         }
386
387         dev_info(gpu->dev, "model: GC%x, revision: %x\n",
388                  gpu->identity.model, gpu->identity.revision);
389
390         gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
391         /*
392          * If there is a match in the HWDB, we aren't interested in the
393          * remaining register values, as they might be wrong.
394          */
395         if (etnaviv_fill_identity_from_hwdb(gpu))
396                 return;
397
398         gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
399
400         /* Disable fast clear on GC700. */
401         if (gpu->identity.model == chipModel_GC700)
402                 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
403
404         if ((gpu->identity.model == chipModel_GC500 &&
405              gpu->identity.revision < 2) ||
406             (gpu->identity.model == chipModel_GC300 &&
407              gpu->identity.revision < 0x2000)) {
408
409                 /*
410                  * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
411                  * registers.
412                  */
413                 gpu->identity.minor_features0 = 0;
414                 gpu->identity.minor_features1 = 0;
415                 gpu->identity.minor_features2 = 0;
416                 gpu->identity.minor_features3 = 0;
417                 gpu->identity.minor_features4 = 0;
418                 gpu->identity.minor_features5 = 0;
419         } else
420                 gpu->identity.minor_features0 =
421                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
422
423         if (gpu->identity.minor_features0 &
424             chipMinorFeatures0_MORE_MINOR_FEATURES) {
425                 gpu->identity.minor_features1 =
426                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
427                 gpu->identity.minor_features2 =
428                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
429                 gpu->identity.minor_features3 =
430                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
431                 gpu->identity.minor_features4 =
432                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
433                 gpu->identity.minor_features5 =
434                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
435         }
436
437         /* GC600 idle register reports zero bits where modules aren't present */
438         if (gpu->identity.model == chipModel_GC600)
439                 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
440                                  VIVS_HI_IDLE_STATE_RA |
441                                  VIVS_HI_IDLE_STATE_SE |
442                                  VIVS_HI_IDLE_STATE_PA |
443                                  VIVS_HI_IDLE_STATE_SH |
444                                  VIVS_HI_IDLE_STATE_PE |
445                                  VIVS_HI_IDLE_STATE_DE |
446                                  VIVS_HI_IDLE_STATE_FE;
447
448         etnaviv_hw_specs(gpu);
449 }
450
451 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
452 {
453         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
454                   VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
455         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
456 }
457
458 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
459 {
460         if (gpu->identity.minor_features2 &
461             chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
462                 clk_set_rate(gpu->clk_core,
463                              gpu->base_rate_core >> gpu->freq_scale);
464                 clk_set_rate(gpu->clk_shader,
465                              gpu->base_rate_shader >> gpu->freq_scale);
466         } else {
467                 unsigned int fscale = 1 << (6 - gpu->freq_scale);
468                 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
469
470                 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
471                 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
472                 etnaviv_gpu_load_clock(gpu, clock);
473         }
474 }
475
476 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
477 {
478         u32 control, idle;
479         unsigned long timeout;
480         bool failed = true;
481
482         /* We hope that the GPU resets in under one second */
483         timeout = jiffies + msecs_to_jiffies(1000);
484
485         while (time_is_after_jiffies(timeout)) {
486                 /* enable clock */
487                 unsigned int fscale = 1 << (6 - gpu->freq_scale);
488                 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
489                 etnaviv_gpu_load_clock(gpu, control);
490
491                 /* isolate the GPU. */
492                 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
493                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
494
495                 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
496                         gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
497                                   VIVS_MMUv2_AHB_CONTROL_RESET);
498                 } else {
499                         /* set soft reset. */
500                         control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
501                         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
502                 }
503
504                 /* wait for reset. */
505                 usleep_range(10, 20);
506
507                 /* reset soft reset bit. */
508                 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
509                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
510
511                 /* reset GPU isolation. */
512                 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
513                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
514
515                 /* read idle register. */
516                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
517
518                 /* try reseting again if FE it not idle */
519                 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
520                         dev_dbg(gpu->dev, "FE is not idle\n");
521                         continue;
522                 }
523
524                 /* read reset register. */
525                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
526
527                 /* is the GPU idle? */
528                 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
529                     ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
530                         dev_dbg(gpu->dev, "GPU is not idle\n");
531                         continue;
532                 }
533
534                 /* disable debug registers, as they are not normally needed */
535                 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
536                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
537
538                 failed = false;
539                 break;
540         }
541
542         if (failed) {
543                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
544                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
545
546                 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
547                         idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
548                         control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
549                         control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
550
551                 return -EBUSY;
552         }
553
554         /* We rely on the GPU running, so program the clock */
555         etnaviv_gpu_update_clock(gpu);
556
557         return 0;
558 }
559
560 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
561 {
562         u32 pmc, ppc;
563
564         /* enable clock gating */
565         ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
566         ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
567
568         /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
569         if (gpu->identity.revision == 0x4301 ||
570             gpu->identity.revision == 0x4302)
571                 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
572
573         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
574
575         pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
576
577         /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
578         if (gpu->identity.model >= chipModel_GC400 &&
579             gpu->identity.model != chipModel_GC420 &&
580             !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
581                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
582
583         /*
584          * Disable PE clock gating on revs < 5.0.0.0 when HZ is
585          * present without a bug fix.
586          */
587         if (gpu->identity.revision < 0x5000 &&
588             gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
589             !(gpu->identity.minor_features1 &
590               chipMinorFeatures1_DISABLE_PE_GATING))
591                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
592
593         if (gpu->identity.revision < 0x5422)
594                 pmc |= BIT(15); /* Unknown bit */
595
596         /* Disable TX clock gating on affected core revisions. */
597         if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
598             etnaviv_is_model_rev(gpu, GC2000, 0x5108))
599                 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
600
601         pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
602         pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
603
604         gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
605 }
606
607 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
608 {
609         gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
610         gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
611                   VIVS_FE_COMMAND_CONTROL_ENABLE |
612                   VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
613
614         if (gpu->sec_mode == ETNA_SEC_KERNEL) {
615                 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
616                           VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
617                           VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
618         }
619 }
620
621 static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
622 {
623         u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
624                                 &gpu->mmu_context->cmdbuf_mapping);
625         u16 prefetch;
626
627         /* setup the MMU */
628         etnaviv_iommu_restore(gpu, gpu->mmu_context);
629
630         /* Start command processor */
631         prefetch = etnaviv_buffer_init(gpu);
632
633         etnaviv_gpu_start_fe(gpu, address, prefetch);
634 }
635
636 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
637 {
638         /*
639          * Base value for VIVS_PM_PULSE_EATER register on models where it
640          * cannot be read, extracted from vivante kernel driver.
641          */
642         u32 pulse_eater = 0x01590880;
643
644         if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
645             etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
646                 pulse_eater |= BIT(23);
647
648         }
649
650         if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
651             etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
652                 pulse_eater &= ~BIT(16);
653                 pulse_eater |= BIT(17);
654         }
655
656         if ((gpu->identity.revision > 0x5420) &&
657             (gpu->identity.features & chipFeatures_PIPE_3D))
658         {
659                 /* Performance fix: disable internal DFS */
660                 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
661                 pulse_eater |= BIT(18);
662         }
663
664         gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
665 }
666
667 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
668 {
669         if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
670              etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
671             gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
672                 u32 mc_memory_debug;
673
674                 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
675
676                 if (gpu->identity.revision == 0x5007)
677                         mc_memory_debug |= 0x0c;
678                 else
679                         mc_memory_debug |= 0x08;
680
681                 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
682         }
683
684         /* enable module-level clock gating */
685         etnaviv_gpu_enable_mlcg(gpu);
686
687         /*
688          * Update GPU AXI cache atttribute to "cacheable, no allocate".
689          * This is necessary to prevent the iMX6 SoC locking up.
690          */
691         gpu_write(gpu, VIVS_HI_AXI_CONFIG,
692                   VIVS_HI_AXI_CONFIG_AWCACHE(2) |
693                   VIVS_HI_AXI_CONFIG_ARCACHE(2));
694
695         /* GC2000 rev 5108 needs a special bus config */
696         if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
697                 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
698                 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
699                                 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
700                 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
701                               VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
702                 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
703         }
704
705         if (gpu->sec_mode == ETNA_SEC_KERNEL) {
706                 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
707                 val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
708                 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
709         }
710
711         /* setup the pulse eater */
712         etnaviv_gpu_setup_pulse_eater(gpu);
713
714         gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
715 }
716
717 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
718 {
719         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
720         int ret, i;
721
722         ret = pm_runtime_get_sync(gpu->dev);
723         if (ret < 0) {
724                 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
725                 return ret;
726         }
727
728         etnaviv_hw_identify(gpu);
729
730         if (gpu->identity.model == 0) {
731                 dev_err(gpu->dev, "Unknown GPU model\n");
732                 ret = -ENXIO;
733                 goto fail;
734         }
735
736         /* Exclude VG cores with FE2.0 */
737         if (gpu->identity.features & chipFeatures_PIPE_VG &&
738             gpu->identity.features & chipFeatures_FE20) {
739                 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
740                 ret = -ENXIO;
741                 goto fail;
742         }
743
744         /*
745          * On cores with security features supported, we claim control over the
746          * security states.
747          */
748         if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
749             (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
750                 gpu->sec_mode = ETNA_SEC_KERNEL;
751
752         ret = etnaviv_hw_reset(gpu);
753         if (ret) {
754                 dev_err(gpu->dev, "GPU reset failed\n");
755                 goto fail;
756         }
757
758         ret = etnaviv_iommu_global_init(gpu);
759         if (ret)
760                 goto fail;
761
762         /*
763          * Set the GPU linear window to be at the end of the DMA window, where
764          * the CMA area is likely to reside. This ensures that we are able to
765          * map the command buffers while having the linear window overlap as
766          * much RAM as possible, so we can optimize mappings for other buffers.
767          *
768          * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
769          * to different views of the memory on the individual engines.
770          */
771         if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
772             (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
773                 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
774                 if (dma_mask < PHYS_OFFSET + SZ_2G)
775                         priv->mmu_global->memory_base = PHYS_OFFSET;
776                 else
777                         priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
778         } else if (PHYS_OFFSET >= SZ_2G) {
779                 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
780                 priv->mmu_global->memory_base = PHYS_OFFSET;
781                 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
782         }
783
784         /* Create buffer: */
785         ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
786                                   PAGE_SIZE);
787         if (ret) {
788                 dev_err(gpu->dev, "could not create command buffer\n");
789                 goto fail;
790         }
791
792         /* Setup event management */
793         spin_lock_init(&gpu->event_spinlock);
794         init_completion(&gpu->event_free);
795         bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
796         for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
797                 complete(&gpu->event_free);
798
799         /* Now program the hardware */
800         mutex_lock(&gpu->lock);
801         etnaviv_gpu_hw_init(gpu);
802         gpu->exec_state = -1;
803         mutex_unlock(&gpu->lock);
804
805         pm_runtime_mark_last_busy(gpu->dev);
806         pm_runtime_put_autosuspend(gpu->dev);
807
808         gpu->initialized = true;
809
810         return 0;
811
812 fail:
813         pm_runtime_mark_last_busy(gpu->dev);
814         pm_runtime_put_autosuspend(gpu->dev);
815
816         return ret;
817 }
818
819 #ifdef CONFIG_DEBUG_FS
820 struct dma_debug {
821         u32 address[2];
822         u32 state[2];
823 };
824
825 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
826 {
827         u32 i;
828
829         debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
830         debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
831
832         for (i = 0; i < 500; i++) {
833                 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
834                 debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
835
836                 if (debug->address[0] != debug->address[1])
837                         break;
838
839                 if (debug->state[0] != debug->state[1])
840                         break;
841         }
842 }
843
844 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
845 {
846         struct dma_debug debug;
847         u32 dma_lo, dma_hi, axi, idle;
848         int ret;
849
850         seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
851
852         ret = pm_runtime_get_sync(gpu->dev);
853         if (ret < 0)
854                 return ret;
855
856         dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
857         dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
858         axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
859         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
860
861         verify_dma(gpu, &debug);
862
863         seq_puts(m, "\tidentity\n");
864         seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
865         seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
866         seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
867         seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
868         seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
869
870         seq_puts(m, "\tfeatures\n");
871         seq_printf(m, "\t major_features: 0x%08x\n",
872                    gpu->identity.features);
873         seq_printf(m, "\t minor_features0: 0x%08x\n",
874                    gpu->identity.minor_features0);
875         seq_printf(m, "\t minor_features1: 0x%08x\n",
876                    gpu->identity.minor_features1);
877         seq_printf(m, "\t minor_features2: 0x%08x\n",
878                    gpu->identity.minor_features2);
879         seq_printf(m, "\t minor_features3: 0x%08x\n",
880                    gpu->identity.minor_features3);
881         seq_printf(m, "\t minor_features4: 0x%08x\n",
882                    gpu->identity.minor_features4);
883         seq_printf(m, "\t minor_features5: 0x%08x\n",
884                    gpu->identity.minor_features5);
885         seq_printf(m, "\t minor_features6: 0x%08x\n",
886                    gpu->identity.minor_features6);
887         seq_printf(m, "\t minor_features7: 0x%08x\n",
888                    gpu->identity.minor_features7);
889         seq_printf(m, "\t minor_features8: 0x%08x\n",
890                    gpu->identity.minor_features8);
891         seq_printf(m, "\t minor_features9: 0x%08x\n",
892                    gpu->identity.minor_features9);
893         seq_printf(m, "\t minor_features10: 0x%08x\n",
894                    gpu->identity.minor_features10);
895         seq_printf(m, "\t minor_features11: 0x%08x\n",
896                    gpu->identity.minor_features11);
897
898         seq_puts(m, "\tspecs\n");
899         seq_printf(m, "\t stream_count:  %d\n",
900                         gpu->identity.stream_count);
901         seq_printf(m, "\t register_max: %d\n",
902                         gpu->identity.register_max);
903         seq_printf(m, "\t thread_count: %d\n",
904                         gpu->identity.thread_count);
905         seq_printf(m, "\t vertex_cache_size: %d\n",
906                         gpu->identity.vertex_cache_size);
907         seq_printf(m, "\t shader_core_count: %d\n",
908                         gpu->identity.shader_core_count);
909         seq_printf(m, "\t pixel_pipes: %d\n",
910                         gpu->identity.pixel_pipes);
911         seq_printf(m, "\t vertex_output_buffer_size: %d\n",
912                         gpu->identity.vertex_output_buffer_size);
913         seq_printf(m, "\t buffer_size: %d\n",
914                         gpu->identity.buffer_size);
915         seq_printf(m, "\t instruction_count: %d\n",
916                         gpu->identity.instruction_count);
917         seq_printf(m, "\t num_constants: %d\n",
918                         gpu->identity.num_constants);
919         seq_printf(m, "\t varyings_count: %d\n",
920                         gpu->identity.varyings_count);
921
922         seq_printf(m, "\taxi: 0x%08x\n", axi);
923         seq_printf(m, "\tidle: 0x%08x\n", idle);
924         idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
925         if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
926                 seq_puts(m, "\t FE is not idle\n");
927         if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
928                 seq_puts(m, "\t DE is not idle\n");
929         if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
930                 seq_puts(m, "\t PE is not idle\n");
931         if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
932                 seq_puts(m, "\t SH is not idle\n");
933         if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
934                 seq_puts(m, "\t PA is not idle\n");
935         if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
936                 seq_puts(m, "\t SE is not idle\n");
937         if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
938                 seq_puts(m, "\t RA is not idle\n");
939         if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
940                 seq_puts(m, "\t TX is not idle\n");
941         if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
942                 seq_puts(m, "\t VG is not idle\n");
943         if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
944                 seq_puts(m, "\t IM is not idle\n");
945         if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
946                 seq_puts(m, "\t FP is not idle\n");
947         if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
948                 seq_puts(m, "\t TS is not idle\n");
949         if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
950                 seq_puts(m, "\t AXI low power mode\n");
951
952         if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
953                 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
954                 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
955                 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
956
957                 seq_puts(m, "\tMC\n");
958                 seq_printf(m, "\t read0: 0x%08x\n", read0);
959                 seq_printf(m, "\t read1: 0x%08x\n", read1);
960                 seq_printf(m, "\t write: 0x%08x\n", write);
961         }
962
963         seq_puts(m, "\tDMA ");
964
965         if (debug.address[0] == debug.address[1] &&
966             debug.state[0] == debug.state[1]) {
967                 seq_puts(m, "seems to be stuck\n");
968         } else if (debug.address[0] == debug.address[1]) {
969                 seq_puts(m, "address is constant\n");
970         } else {
971                 seq_puts(m, "is running\n");
972         }
973
974         seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
975         seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
976         seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
977         seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
978         seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
979                    dma_lo, dma_hi);
980
981         ret = 0;
982
983         pm_runtime_mark_last_busy(gpu->dev);
984         pm_runtime_put_autosuspend(gpu->dev);
985
986         return ret;
987 }
988 #endif
989
990 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
991 {
992         unsigned int i = 0;
993
994         dev_err(gpu->dev, "recover hung GPU!\n");
995
996         if (pm_runtime_get_sync(gpu->dev) < 0)
997                 return;
998
999         mutex_lock(&gpu->lock);
1000
1001         etnaviv_hw_reset(gpu);
1002
1003         /* complete all events, the GPU won't do it after the reset */
1004         spin_lock(&gpu->event_spinlock);
1005         for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
1006                 complete(&gpu->event_free);
1007         bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
1008         spin_unlock(&gpu->event_spinlock);
1009
1010         etnaviv_gpu_hw_init(gpu);
1011         gpu->exec_state = -1;
1012         gpu->mmu_context = NULL;
1013
1014         mutex_unlock(&gpu->lock);
1015         pm_runtime_mark_last_busy(gpu->dev);
1016         pm_runtime_put_autosuspend(gpu->dev);
1017 }
1018
1019 /* fence object management */
1020 struct etnaviv_fence {
1021         struct etnaviv_gpu *gpu;
1022         struct dma_fence base;
1023 };
1024
1025 static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1026 {
1027         return container_of(fence, struct etnaviv_fence, base);
1028 }
1029
1030 static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1031 {
1032         return "etnaviv";
1033 }
1034
1035 static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1036 {
1037         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1038
1039         return dev_name(f->gpu->dev);
1040 }
1041
1042 static bool etnaviv_fence_signaled(struct dma_fence *fence)
1043 {
1044         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1045
1046         return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1047 }
1048
1049 static void etnaviv_fence_release(struct dma_fence *fence)
1050 {
1051         struct etnaviv_fence *f = to_etnaviv_fence(fence);
1052
1053         kfree_rcu(f, base.rcu);
1054 }
1055
1056 static const struct dma_fence_ops etnaviv_fence_ops = {
1057         .get_driver_name = etnaviv_fence_get_driver_name,
1058         .get_timeline_name = etnaviv_fence_get_timeline_name,
1059         .signaled = etnaviv_fence_signaled,
1060         .release = etnaviv_fence_release,
1061 };
1062
1063 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1064 {
1065         struct etnaviv_fence *f;
1066
1067         /*
1068          * GPU lock must already be held, otherwise fence completion order might
1069          * not match the seqno order assigned here.
1070          */
1071         lockdep_assert_held(&gpu->lock);
1072
1073         f = kzalloc(sizeof(*f), GFP_KERNEL);
1074         if (!f)
1075                 return NULL;
1076
1077         f->gpu = gpu;
1078
1079         dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1080                        gpu->fence_context, ++gpu->next_fence);
1081
1082         return &f->base;
1083 }
1084
1085 /* returns true if fence a comes after fence b */
1086 static inline bool fence_after(u32 a, u32 b)
1087 {
1088         return (s32)(a - b) > 0;
1089 }
1090
1091 /*
1092  * event management:
1093  */
1094
1095 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1096         unsigned int *events)
1097 {
1098         unsigned long timeout = msecs_to_jiffies(10 * 10000);
1099         unsigned i, acquired = 0;
1100
1101         for (i = 0; i < nr_events; i++) {
1102                 unsigned long ret;
1103
1104                 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1105
1106                 if (!ret) {
1107                         dev_err(gpu->dev, "wait_for_completion_timeout failed");
1108                         goto out;
1109                 }
1110
1111                 acquired++;
1112                 timeout = ret;
1113         }
1114
1115         spin_lock(&gpu->event_spinlock);
1116
1117         for (i = 0; i < nr_events; i++) {
1118                 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1119
1120                 events[i] = event;
1121                 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1122                 set_bit(event, gpu->event_bitmap);
1123         }
1124
1125         spin_unlock(&gpu->event_spinlock);
1126
1127         return 0;
1128
1129 out:
1130         for (i = 0; i < acquired; i++)
1131                 complete(&gpu->event_free);
1132
1133         return -EBUSY;
1134 }
1135
1136 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1137 {
1138         if (!test_bit(event, gpu->event_bitmap)) {
1139                 dev_warn(gpu->dev, "event %u is already marked as free",
1140                          event);
1141         } else {
1142                 clear_bit(event, gpu->event_bitmap);
1143                 complete(&gpu->event_free);
1144         }
1145 }
1146
1147 /*
1148  * Cmdstream submission/retirement:
1149  */
1150 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1151         u32 id, struct drm_etnaviv_timespec *timeout)
1152 {
1153         struct dma_fence *fence;
1154         int ret;
1155
1156         /*
1157          * Look up the fence and take a reference. We might still find a fence
1158          * whose refcount has already dropped to zero. dma_fence_get_rcu
1159          * pretends we didn't find a fence in that case.
1160          */
1161         rcu_read_lock();
1162         fence = idr_find(&gpu->fence_idr, id);
1163         if (fence)
1164                 fence = dma_fence_get_rcu(fence);
1165         rcu_read_unlock();
1166
1167         if (!fence)
1168                 return 0;
1169
1170         if (!timeout) {
1171                 /* No timeout was requested: just test for completion */
1172                 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1173         } else {
1174                 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1175
1176                 ret = dma_fence_wait_timeout(fence, true, remaining);
1177                 if (ret == 0)
1178                         ret = -ETIMEDOUT;
1179                 else if (ret != -ERESTARTSYS)
1180                         ret = 0;
1181
1182         }
1183
1184         dma_fence_put(fence);
1185         return ret;
1186 }
1187
1188 /*
1189  * Wait for an object to become inactive.  This, on it's own, is not race
1190  * free: the object is moved by the scheduler off the active list, and
1191  * then the iova is put.  Moreover, the object could be re-submitted just
1192  * after we notice that it's become inactive.
1193  *
1194  * Although the retirement happens under the gpu lock, we don't want to hold
1195  * that lock in this function while waiting.
1196  */
1197 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1198         struct etnaviv_gem_object *etnaviv_obj,
1199         struct drm_etnaviv_timespec *timeout)
1200 {
1201         unsigned long remaining;
1202         long ret;
1203
1204         if (!timeout)
1205                 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1206
1207         remaining = etnaviv_timeout_to_jiffies(timeout);
1208
1209         ret = wait_event_interruptible_timeout(gpu->fence_event,
1210                                                !is_active(etnaviv_obj),
1211                                                remaining);
1212         if (ret > 0)
1213                 return 0;
1214         else if (ret == -ERESTARTSYS)
1215                 return -ERESTARTSYS;
1216         else
1217                 return -ETIMEDOUT;
1218 }
1219
1220 static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1221         struct etnaviv_event *event, unsigned int flags)
1222 {
1223         const struct etnaviv_gem_submit *submit = event->submit;
1224         unsigned int i;
1225
1226         for (i = 0; i < submit->nr_pmrs; i++) {
1227                 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1228
1229                 if (pmr->flags == flags)
1230                         etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1231         }
1232 }
1233
1234 static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1235         struct etnaviv_event *event)
1236 {
1237         u32 val;
1238
1239         /* disable clock gating */
1240         val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1241         val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1242         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1243
1244         /* enable debug register */
1245         val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1246         val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1247         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1248
1249         sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1250 }
1251
1252 static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1253         struct etnaviv_event *event)
1254 {
1255         const struct etnaviv_gem_submit *submit = event->submit;
1256         unsigned int i;
1257         u32 val;
1258
1259         sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1260
1261         for (i = 0; i < submit->nr_pmrs; i++) {
1262                 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1263
1264                 *pmr->bo_vma = pmr->sequence;
1265         }
1266
1267         /* disable debug register */
1268         val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1269         val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1270         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1271
1272         /* enable clock gating */
1273         val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1274         val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1275         gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1276 }
1277
1278
1279 /* add bo's to gpu's ring, and kick gpu: */
1280 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1281 {
1282         struct etnaviv_gpu *gpu = submit->gpu;
1283         struct dma_fence *gpu_fence;
1284         unsigned int i, nr_events = 1, event[3];
1285         int ret;
1286
1287         if (!submit->runtime_resumed) {
1288                 ret = pm_runtime_get_sync(gpu->dev);
1289                 if (ret < 0)
1290                         return NULL;
1291                 submit->runtime_resumed = true;
1292         }
1293
1294         /*
1295          * if there are performance monitor requests we need to have
1296          * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1297          *   requests.
1298          * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1299          *   and update the sequence number for userspace.
1300          */
1301         if (submit->nr_pmrs)
1302                 nr_events = 3;
1303
1304         ret = event_alloc(gpu, nr_events, event);
1305         if (ret) {
1306                 DRM_ERROR("no free events\n");
1307                 return NULL;
1308         }
1309
1310         mutex_lock(&gpu->lock);
1311
1312         gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1313         if (!gpu_fence) {
1314                 for (i = 0; i < nr_events; i++)
1315                         event_free(gpu, event[i]);
1316
1317                 goto out_unlock;
1318         }
1319
1320         if (!gpu->mmu_context) {
1321                 etnaviv_iommu_context_get(submit->mmu_context);
1322                 gpu->mmu_context = submit->mmu_context;
1323                 etnaviv_gpu_start_fe_idleloop(gpu);
1324         } else {
1325                 etnaviv_iommu_context_get(gpu->mmu_context);
1326                 submit->prev_mmu_context = gpu->mmu_context;
1327         }
1328
1329         if (submit->nr_pmrs) {
1330                 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1331                 kref_get(&submit->refcount);
1332                 gpu->event[event[1]].submit = submit;
1333                 etnaviv_sync_point_queue(gpu, event[1]);
1334         }
1335
1336         gpu->event[event[0]].fence = gpu_fence;
1337         submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1338         etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1339                              event[0], &submit->cmdbuf);
1340
1341         if (submit->nr_pmrs) {
1342                 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1343                 kref_get(&submit->refcount);
1344                 gpu->event[event[2]].submit = submit;
1345                 etnaviv_sync_point_queue(gpu, event[2]);
1346         }
1347
1348 out_unlock:
1349         mutex_unlock(&gpu->lock);
1350
1351         return gpu_fence;
1352 }
1353
1354 static void sync_point_worker(struct work_struct *work)
1355 {
1356         struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1357                                                sync_point_work);
1358         struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1359         u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1360
1361         event->sync_point(gpu, event);
1362         etnaviv_submit_put(event->submit);
1363         event_free(gpu, gpu->sync_point_event);
1364
1365         /* restart FE last to avoid GPU and IRQ racing against this worker */
1366         etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1367 }
1368
1369 static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1370 {
1371         u32 status_reg, status;
1372         int i;
1373
1374         if (gpu->sec_mode == ETNA_SEC_NONE)
1375                 status_reg = VIVS_MMUv2_STATUS;
1376         else
1377                 status_reg = VIVS_MMUv2_SEC_STATUS;
1378
1379         status = gpu_read(gpu, status_reg);
1380         dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1381
1382         for (i = 0; i < 4; i++) {
1383                 u32 address_reg;
1384
1385                 if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1386                         continue;
1387
1388                 if (gpu->sec_mode == ETNA_SEC_NONE)
1389                         address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1390                 else
1391                         address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1392
1393                 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1394                                     gpu_read(gpu, address_reg));
1395         }
1396 }
1397
1398 static irqreturn_t irq_handler(int irq, void *data)
1399 {
1400         struct etnaviv_gpu *gpu = data;
1401         irqreturn_t ret = IRQ_NONE;
1402
1403         u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1404
1405         if (intr != 0) {
1406                 int event;
1407
1408                 pm_runtime_mark_last_busy(gpu->dev);
1409
1410                 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1411
1412                 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1413                         dev_err(gpu->dev, "AXI bus error\n");
1414                         intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1415                 }
1416
1417                 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1418                         dump_mmu_fault(gpu);
1419                         intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1420                 }
1421
1422                 while ((event = ffs(intr)) != 0) {
1423                         struct dma_fence *fence;
1424
1425                         event -= 1;
1426
1427                         intr &= ~(1 << event);
1428
1429                         dev_dbg(gpu->dev, "event %u\n", event);
1430
1431                         if (gpu->event[event].sync_point) {
1432                                 gpu->sync_point_event = event;
1433                                 queue_work(gpu->wq, &gpu->sync_point_work);
1434                         }
1435
1436                         fence = gpu->event[event].fence;
1437                         if (!fence)
1438                                 continue;
1439
1440                         gpu->event[event].fence = NULL;
1441
1442                         /*
1443                          * Events can be processed out of order.  Eg,
1444                          * - allocate and queue event 0
1445                          * - allocate event 1
1446                          * - event 0 completes, we process it
1447                          * - allocate and queue event 0
1448                          * - event 1 and event 0 complete
1449                          * we can end up processing event 0 first, then 1.
1450                          */
1451                         if (fence_after(fence->seqno, gpu->completed_fence))
1452                                 gpu->completed_fence = fence->seqno;
1453                         dma_fence_signal(fence);
1454
1455                         event_free(gpu, event);
1456                 }
1457
1458                 ret = IRQ_HANDLED;
1459         }
1460
1461         return ret;
1462 }
1463
1464 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1465 {
1466         int ret;
1467
1468         if (gpu->clk_reg) {
1469                 ret = clk_prepare_enable(gpu->clk_reg);
1470                 if (ret)
1471                         return ret;
1472         }
1473
1474         if (gpu->clk_bus) {
1475                 ret = clk_prepare_enable(gpu->clk_bus);
1476                 if (ret)
1477                         return ret;
1478         }
1479
1480         if (gpu->clk_core) {
1481                 ret = clk_prepare_enable(gpu->clk_core);
1482                 if (ret)
1483                         goto disable_clk_bus;
1484         }
1485
1486         if (gpu->clk_shader) {
1487                 ret = clk_prepare_enable(gpu->clk_shader);
1488                 if (ret)
1489                         goto disable_clk_core;
1490         }
1491
1492         return 0;
1493
1494 disable_clk_core:
1495         if (gpu->clk_core)
1496                 clk_disable_unprepare(gpu->clk_core);
1497 disable_clk_bus:
1498         if (gpu->clk_bus)
1499                 clk_disable_unprepare(gpu->clk_bus);
1500
1501         return ret;
1502 }
1503
1504 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1505 {
1506         if (gpu->clk_shader)
1507                 clk_disable_unprepare(gpu->clk_shader);
1508         if (gpu->clk_core)
1509                 clk_disable_unprepare(gpu->clk_core);
1510         if (gpu->clk_bus)
1511                 clk_disable_unprepare(gpu->clk_bus);
1512         if (gpu->clk_reg)
1513                 clk_disable_unprepare(gpu->clk_reg);
1514
1515         return 0;
1516 }
1517
1518 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1519 {
1520         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1521
1522         do {
1523                 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1524
1525                 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1526                         return 0;
1527
1528                 if (time_is_before_jiffies(timeout)) {
1529                         dev_warn(gpu->dev,
1530                                  "timed out waiting for idle: idle=0x%x\n",
1531                                  idle);
1532                         return -ETIMEDOUT;
1533                 }
1534
1535                 udelay(5);
1536         } while (1);
1537 }
1538
1539 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1540 {
1541         if (gpu->initialized && gpu->mmu_context) {
1542                 /* Replace the last WAIT with END */
1543                 mutex_lock(&gpu->lock);
1544                 etnaviv_buffer_end(gpu);
1545                 mutex_unlock(&gpu->lock);
1546
1547                 /*
1548                  * We know that only the FE is busy here, this should
1549                  * happen quickly (as the WAIT is only 200 cycles).  If
1550                  * we fail, just warn and continue.
1551                  */
1552                 etnaviv_gpu_wait_idle(gpu, 100);
1553
1554                 etnaviv_iommu_context_put(gpu->mmu_context);
1555                 gpu->mmu_context = NULL;
1556         }
1557
1558         gpu->exec_state = -1;
1559
1560         return etnaviv_gpu_clk_disable(gpu);
1561 }
1562
1563 #ifdef CONFIG_PM
1564 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1565 {
1566         int ret;
1567
1568         ret = mutex_lock_killable(&gpu->lock);
1569         if (ret)
1570                 return ret;
1571
1572         etnaviv_gpu_update_clock(gpu);
1573         etnaviv_gpu_hw_init(gpu);
1574
1575         mutex_unlock(&gpu->lock);
1576
1577         return 0;
1578 }
1579 #endif
1580
1581 static int
1582 etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1583                                   unsigned long *state)
1584 {
1585         *state = 6;
1586
1587         return 0;
1588 }
1589
1590 static int
1591 etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1592                                   unsigned long *state)
1593 {
1594         struct etnaviv_gpu *gpu = cdev->devdata;
1595
1596         *state = gpu->freq_scale;
1597
1598         return 0;
1599 }
1600
1601 static int
1602 etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1603                                   unsigned long state)
1604 {
1605         struct etnaviv_gpu *gpu = cdev->devdata;
1606
1607         mutex_lock(&gpu->lock);
1608         gpu->freq_scale = state;
1609         if (!pm_runtime_suspended(gpu->dev))
1610                 etnaviv_gpu_update_clock(gpu);
1611         mutex_unlock(&gpu->lock);
1612
1613         return 0;
1614 }
1615
1616 static struct thermal_cooling_device_ops cooling_ops = {
1617         .get_max_state = etnaviv_gpu_cooling_get_max_state,
1618         .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1619         .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1620 };
1621
1622 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1623         void *data)
1624 {
1625         struct drm_device *drm = data;
1626         struct etnaviv_drm_private *priv = drm->dev_private;
1627         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1628         int ret;
1629
1630         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1631                 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1632                                 (char *)dev_name(dev), gpu, &cooling_ops);
1633                 if (IS_ERR(gpu->cooling))
1634                         return PTR_ERR(gpu->cooling);
1635         }
1636
1637         gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1638         if (!gpu->wq) {
1639                 ret = -ENOMEM;
1640                 goto out_thermal;
1641         }
1642
1643         ret = etnaviv_sched_init(gpu);
1644         if (ret)
1645                 goto out_workqueue;
1646
1647 #ifdef CONFIG_PM
1648         ret = pm_runtime_get_sync(gpu->dev);
1649 #else
1650         ret = etnaviv_gpu_clk_enable(gpu);
1651 #endif
1652         if (ret < 0)
1653                 goto out_sched;
1654
1655
1656         gpu->drm = drm;
1657         gpu->fence_context = dma_fence_context_alloc(1);
1658         idr_init(&gpu->fence_idr);
1659         spin_lock_init(&gpu->fence_spinlock);
1660
1661         INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1662         init_waitqueue_head(&gpu->fence_event);
1663
1664         priv->gpu[priv->num_gpus++] = gpu;
1665
1666         pm_runtime_mark_last_busy(gpu->dev);
1667         pm_runtime_put_autosuspend(gpu->dev);
1668
1669         return 0;
1670
1671 out_sched:
1672         etnaviv_sched_fini(gpu);
1673
1674 out_workqueue:
1675         destroy_workqueue(gpu->wq);
1676
1677 out_thermal:
1678         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1679                 thermal_cooling_device_unregister(gpu->cooling);
1680
1681         return ret;
1682 }
1683
1684 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1685         void *data)
1686 {
1687         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1688
1689         DBG("%s", dev_name(gpu->dev));
1690
1691         flush_workqueue(gpu->wq);
1692         destroy_workqueue(gpu->wq);
1693
1694         etnaviv_sched_fini(gpu);
1695
1696 #ifdef CONFIG_PM
1697         pm_runtime_get_sync(gpu->dev);
1698         pm_runtime_put_sync_suspend(gpu->dev);
1699 #else
1700         etnaviv_gpu_hw_suspend(gpu);
1701 #endif
1702
1703         if (gpu->initialized) {
1704                 etnaviv_cmdbuf_free(&gpu->buffer);
1705                 etnaviv_iommu_global_fini(gpu);
1706                 gpu->initialized = false;
1707         }
1708
1709         gpu->drm = NULL;
1710         idr_destroy(&gpu->fence_idr);
1711
1712         if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1713                 thermal_cooling_device_unregister(gpu->cooling);
1714         gpu->cooling = NULL;
1715 }
1716
1717 static const struct component_ops gpu_ops = {
1718         .bind = etnaviv_gpu_bind,
1719         .unbind = etnaviv_gpu_unbind,
1720 };
1721
1722 static const struct of_device_id etnaviv_gpu_match[] = {
1723         {
1724                 .compatible = "vivante,gc"
1725         },
1726         { /* sentinel */ }
1727 };
1728 MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1729
1730 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1731 {
1732         struct device *dev = &pdev->dev;
1733         struct etnaviv_gpu *gpu;
1734         int err;
1735
1736         gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1737         if (!gpu)
1738                 return -ENOMEM;
1739
1740         gpu->dev = &pdev->dev;
1741         mutex_init(&gpu->lock);
1742         mutex_init(&gpu->fence_lock);
1743
1744         /* Map registers: */
1745         gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1746         if (IS_ERR(gpu->mmio))
1747                 return PTR_ERR(gpu->mmio);
1748
1749         /* Get Interrupt: */
1750         gpu->irq = platform_get_irq(pdev, 0);
1751         if (gpu->irq < 0) {
1752                 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1753                 return gpu->irq;
1754         }
1755
1756         err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1757                                dev_name(gpu->dev), gpu);
1758         if (err) {
1759                 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1760                 return err;
1761         }
1762
1763         /* Get Clocks: */
1764         gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1765         DBG("clk_reg: %p", gpu->clk_reg);
1766         if (IS_ERR(gpu->clk_reg))
1767                 gpu->clk_reg = NULL;
1768
1769         gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1770         DBG("clk_bus: %p", gpu->clk_bus);
1771         if (IS_ERR(gpu->clk_bus))
1772                 gpu->clk_bus = NULL;
1773
1774         gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1775         DBG("clk_core: %p", gpu->clk_core);
1776         if (IS_ERR(gpu->clk_core))
1777                 gpu->clk_core = NULL;
1778         gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1779
1780         gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1781         DBG("clk_shader: %p", gpu->clk_shader);
1782         if (IS_ERR(gpu->clk_shader))
1783                 gpu->clk_shader = NULL;
1784         gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1785
1786         /* TODO: figure out max mapped size */
1787         dev_set_drvdata(dev, gpu);
1788
1789         /*
1790          * We treat the device as initially suspended.  The runtime PM
1791          * autosuspend delay is rather arbitary: no measurements have
1792          * yet been performed to determine an appropriate value.
1793          */
1794         pm_runtime_use_autosuspend(gpu->dev);
1795         pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1796         pm_runtime_enable(gpu->dev);
1797
1798         err = component_add(&pdev->dev, &gpu_ops);
1799         if (err < 0) {
1800                 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1801                 return err;
1802         }
1803
1804         return 0;
1805 }
1806
1807 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1808 {
1809         component_del(&pdev->dev, &gpu_ops);
1810         pm_runtime_disable(&pdev->dev);
1811         return 0;
1812 }
1813
1814 #ifdef CONFIG_PM
1815 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1816 {
1817         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1818         u32 idle, mask;
1819
1820         /* If there are any jobs in the HW queue, we're not idle */
1821         if (atomic_read(&gpu->sched.hw_rq_count))
1822                 return -EBUSY;
1823
1824         /* Check whether the hardware (except FE) is idle */
1825         mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1826         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1827         if (idle != mask)
1828                 return -EBUSY;
1829
1830         return etnaviv_gpu_hw_suspend(gpu);
1831 }
1832
1833 static int etnaviv_gpu_rpm_resume(struct device *dev)
1834 {
1835         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1836         int ret;
1837
1838         ret = etnaviv_gpu_clk_enable(gpu);
1839         if (ret)
1840                 return ret;
1841
1842         /* Re-initialise the basic hardware state */
1843         if (gpu->drm && gpu->initialized) {
1844                 ret = etnaviv_gpu_hw_resume(gpu);
1845                 if (ret) {
1846                         etnaviv_gpu_clk_disable(gpu);
1847                         return ret;
1848                 }
1849         }
1850
1851         return 0;
1852 }
1853 #endif
1854
1855 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1856         SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1857                            NULL)
1858 };
1859
1860 struct platform_driver etnaviv_gpu_driver = {
1861         .driver = {
1862                 .name = "etnaviv-gpu",
1863                 .owner = THIS_MODULE,
1864                 .pm = &etnaviv_gpu_pm_ops,
1865                 .of_match_table = etnaviv_gpu_match,
1866         },
1867         .probe = etnaviv_gpu_platform_probe,
1868         .remove = etnaviv_gpu_platform_remove,
1869         .id_table = gpu_ids,
1870 };