Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / drivers / gpu / drm / panfrost / panfrost_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5
6 #include <linux/module.h>
7 #include <linux/of_platform.h>
8 #include <linux/pagemap.h>
9 #include <linux/pm_runtime.h>
10 #include <drm/panfrost_drm.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_syncobj.h>
14 #include <drm/drm_utils.h>
15
16 #include "panfrost_device.h"
17 #include "panfrost_gem.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_job.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_perfcnt.h"
22
23 static bool unstable_ioctls;
24 module_param_unsafe(unstable_ioctls, bool, 0600);
25
26 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
27 {
28         struct drm_panfrost_get_param *param = data;
29         struct panfrost_device *pfdev = ddev->dev_private;
30
31         if (param->pad != 0)
32                 return -EINVAL;
33
34 #define PANFROST_FEATURE(name, member)                  \
35         case DRM_PANFROST_PARAM_ ## name:               \
36                 param->value = pfdev->features.member;  \
37                 break
38 #define PANFROST_FEATURE_ARRAY(name, member, max)                       \
39         case DRM_PANFROST_PARAM_ ## name ## 0 ...                       \
40                 DRM_PANFROST_PARAM_ ## name ## max:                     \
41                 param->value = pfdev->features.member[param->param -    \
42                         DRM_PANFROST_PARAM_ ## name ## 0];              \
43                 break
44
45         switch (param->param) {
46                 PANFROST_FEATURE(GPU_PROD_ID, id);
47                 PANFROST_FEATURE(GPU_REVISION, revision);
48                 PANFROST_FEATURE(SHADER_PRESENT, shader_present);
49                 PANFROST_FEATURE(TILER_PRESENT, tiler_present);
50                 PANFROST_FEATURE(L2_PRESENT, l2_present);
51                 PANFROST_FEATURE(STACK_PRESENT, stack_present);
52                 PANFROST_FEATURE(AS_PRESENT, as_present);
53                 PANFROST_FEATURE(JS_PRESENT, js_present);
54                 PANFROST_FEATURE(L2_FEATURES, l2_features);
55                 PANFROST_FEATURE(CORE_FEATURES, core_features);
56                 PANFROST_FEATURE(TILER_FEATURES, tiler_features);
57                 PANFROST_FEATURE(MEM_FEATURES, mem_features);
58                 PANFROST_FEATURE(MMU_FEATURES, mmu_features);
59                 PANFROST_FEATURE(THREAD_FEATURES, thread_features);
60                 PANFROST_FEATURE(MAX_THREADS, max_threads);
61                 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
62                                 thread_max_workgroup_sz);
63                 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
64                                 thread_max_barrier_sz);
65                 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
66                 PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
67                 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
68                 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
69                 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
70                 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
71         default:
72                 return -EINVAL;
73         }
74
75         return 0;
76 }
77
78 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
79                 struct drm_file *file)
80 {
81         struct panfrost_file_priv *priv = file->driver_priv;
82         struct panfrost_gem_object *bo;
83         struct drm_panfrost_create_bo *args = data;
84         struct panfrost_gem_mapping *mapping;
85         int ret;
86
87         if (!args->size || args->pad ||
88             (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
89                 return -EINVAL;
90
91         /* Heaps should never be executable */
92         if ((args->flags & PANFROST_BO_HEAP) &&
93             !(args->flags & PANFROST_BO_NOEXEC))
94                 return -EINVAL;
95
96         bo = panfrost_gem_create(dev, args->size, args->flags);
97         if (IS_ERR(bo))
98                 return PTR_ERR(bo);
99
100         ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
101         if (ret)
102                 goto out;
103
104         mapping = panfrost_gem_mapping_get(bo, priv);
105         if (mapping) {
106                 args->offset = mapping->mmnode.start << PAGE_SHIFT;
107                 panfrost_gem_mapping_put(mapping);
108         } else {
109                 /* This can only happen if the handle from
110                  * drm_gem_handle_create() has already been guessed and freed
111                  * by user space
112                  */
113                 ret = -EINVAL;
114         }
115
116 out:
117         drm_gem_object_put(&bo->base.base);
118         return ret;
119 }
120
121 /**
122  * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
123  * referenced by the job.
124  * @dev: DRM device
125  * @file_priv: DRM file for this fd
126  * @args: IOCTL args
127  * @job: job being set up
128  *
129  * Resolve handles from userspace to BOs and attach them to job.
130  *
131  * Note that this function doesn't need to unreference the BOs on
132  * failure, because that will happen at panfrost_job_cleanup() time.
133  */
134 static int
135 panfrost_lookup_bos(struct drm_device *dev,
136                   struct drm_file *file_priv,
137                   struct drm_panfrost_submit *args,
138                   struct panfrost_job *job)
139 {
140         struct panfrost_file_priv *priv = file_priv->driver_priv;
141         struct panfrost_gem_object *bo;
142         unsigned int i;
143         int ret;
144
145         job->bo_count = args->bo_handle_count;
146
147         if (!job->bo_count)
148                 return 0;
149
150         ret = drm_gem_objects_lookup(file_priv,
151                                      (void __user *)(uintptr_t)args->bo_handles,
152                                      job->bo_count, &job->bos);
153         if (ret)
154                 return ret;
155
156         job->mappings = kvmalloc_array(job->bo_count,
157                                        sizeof(struct panfrost_gem_mapping *),
158                                        GFP_KERNEL | __GFP_ZERO);
159         if (!job->mappings)
160                 return -ENOMEM;
161
162         for (i = 0; i < job->bo_count; i++) {
163                 struct panfrost_gem_mapping *mapping;
164
165                 bo = to_panfrost_bo(job->bos[i]);
166                 mapping = panfrost_gem_mapping_get(bo, priv);
167                 if (!mapping) {
168                         ret = -EINVAL;
169                         break;
170                 }
171
172                 atomic_inc(&bo->gpu_usecount);
173                 job->mappings[i] = mapping;
174         }
175
176         return ret;
177 }
178
179 /**
180  * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
181  * referenced by the job.
182  * @dev: DRM device
183  * @file_priv: DRM file for this fd
184  * @args: IOCTL args
185  * @job: job being set up
186  *
187  * Resolve syncobjs from userspace to fences and attach them to job.
188  *
189  * Note that this function doesn't need to unreference the fences on
190  * failure, because that will happen at panfrost_job_cleanup() time.
191  */
192 static int
193 panfrost_copy_in_sync(struct drm_device *dev,
194                   struct drm_file *file_priv,
195                   struct drm_panfrost_submit *args,
196                   struct panfrost_job *job)
197 {
198         u32 *handles;
199         int ret = 0;
200         int i, in_fence_count;
201
202         in_fence_count = args->in_sync_count;
203
204         if (!in_fence_count)
205                 return 0;
206
207         handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
208         if (!handles) {
209                 ret = -ENOMEM;
210                 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
211                 goto fail;
212         }
213
214         if (copy_from_user(handles,
215                            (void __user *)(uintptr_t)args->in_syncs,
216                            in_fence_count * sizeof(u32))) {
217                 ret = -EFAULT;
218                 DRM_DEBUG("Failed to copy in syncobj handles\n");
219                 goto fail;
220         }
221
222         for (i = 0; i < in_fence_count; i++) {
223                 struct dma_fence *fence;
224
225                 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
226                                              &fence);
227                 if (ret)
228                         goto fail;
229
230                 ret = drm_sched_job_add_dependency(&job->base, fence);
231
232                 if (ret)
233                         goto fail;
234         }
235
236 fail:
237         kvfree(handles);
238         return ret;
239 }
240
241 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
242                 struct drm_file *file)
243 {
244         struct panfrost_device *pfdev = dev->dev_private;
245         struct panfrost_file_priv *file_priv = file->driver_priv;
246         struct drm_panfrost_submit *args = data;
247         struct drm_syncobj *sync_out = NULL;
248         struct panfrost_job *job;
249         int ret = 0, slot;
250
251         if (!args->jc)
252                 return -EINVAL;
253
254         if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
255                 return -EINVAL;
256
257         if (args->out_sync > 0) {
258                 sync_out = drm_syncobj_find(file, args->out_sync);
259                 if (!sync_out)
260                         return -ENODEV;
261         }
262
263         job = kzalloc(sizeof(*job), GFP_KERNEL);
264         if (!job) {
265                 ret = -ENOMEM;
266                 goto out_put_syncout;
267         }
268
269         kref_init(&job->refcount);
270
271         job->pfdev = pfdev;
272         job->jc = args->jc;
273         job->requirements = args->requirements;
274         job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
275         job->mmu = file_priv->mmu;
276
277         slot = panfrost_job_get_slot(job);
278
279         ret = drm_sched_job_init(&job->base,
280                                  &file_priv->sched_entity[slot],
281                                  NULL);
282         if (ret)
283                 goto out_put_job;
284
285         ret = panfrost_copy_in_sync(dev, file, args, job);
286         if (ret)
287                 goto out_cleanup_job;
288
289         ret = panfrost_lookup_bos(dev, file, args, job);
290         if (ret)
291                 goto out_cleanup_job;
292
293         ret = panfrost_job_push(job);
294         if (ret)
295                 goto out_cleanup_job;
296
297         /* Update the return sync object for the job */
298         if (sync_out)
299                 drm_syncobj_replace_fence(sync_out, job->render_done_fence);
300
301 out_cleanup_job:
302         if (ret)
303                 drm_sched_job_cleanup(&job->base);
304 out_put_job:
305         panfrost_job_put(job);
306 out_put_syncout:
307         if (sync_out)
308                 drm_syncobj_put(sync_out);
309
310         return ret;
311 }
312
313 static int
314 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
315                        struct drm_file *file_priv)
316 {
317         long ret;
318         struct drm_panfrost_wait_bo *args = data;
319         struct drm_gem_object *gem_obj;
320         unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
321
322         if (args->pad)
323                 return -EINVAL;
324
325         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
326         if (!gem_obj)
327                 return -ENOENT;
328
329         ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
330                                     true, timeout);
331         if (!ret)
332                 ret = timeout ? -ETIMEDOUT : -EBUSY;
333
334         drm_gem_object_put(gem_obj);
335
336         return ret;
337 }
338
339 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
340                       struct drm_file *file_priv)
341 {
342         struct drm_panfrost_mmap_bo *args = data;
343         struct drm_gem_object *gem_obj;
344         int ret;
345
346         if (args->flags != 0) {
347                 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
348                 return -EINVAL;
349         }
350
351         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
352         if (!gem_obj) {
353                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
354                 return -ENOENT;
355         }
356
357         /* Don't allow mmapping of heap objects as pages are not pinned. */
358         if (to_panfrost_bo(gem_obj)->is_heap) {
359                 ret = -EINVAL;
360                 goto out;
361         }
362
363         ret = drm_gem_create_mmap_offset(gem_obj);
364         if (ret == 0)
365                 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
366
367 out:
368         drm_gem_object_put(gem_obj);
369         return ret;
370 }
371
372 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
373                             struct drm_file *file_priv)
374 {
375         struct panfrost_file_priv *priv = file_priv->driver_priv;
376         struct drm_panfrost_get_bo_offset *args = data;
377         struct panfrost_gem_mapping *mapping;
378         struct drm_gem_object *gem_obj;
379         struct panfrost_gem_object *bo;
380
381         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
382         if (!gem_obj) {
383                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
384                 return -ENOENT;
385         }
386         bo = to_panfrost_bo(gem_obj);
387
388         mapping = panfrost_gem_mapping_get(bo, priv);
389         drm_gem_object_put(gem_obj);
390
391         if (!mapping)
392                 return -EINVAL;
393
394         args->offset = mapping->mmnode.start << PAGE_SHIFT;
395         panfrost_gem_mapping_put(mapping);
396         return 0;
397 }
398
399 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
400                                   struct drm_file *file_priv)
401 {
402         struct panfrost_file_priv *priv = file_priv->driver_priv;
403         struct drm_panfrost_madvise *args = data;
404         struct panfrost_device *pfdev = dev->dev_private;
405         struct drm_gem_object *gem_obj;
406         struct panfrost_gem_object *bo;
407         int ret = 0;
408
409         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
410         if (!gem_obj) {
411                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
412                 return -ENOENT;
413         }
414
415         bo = to_panfrost_bo(gem_obj);
416
417         mutex_lock(&pfdev->shrinker_lock);
418         mutex_lock(&bo->mappings.lock);
419         if (args->madv == PANFROST_MADV_DONTNEED) {
420                 struct panfrost_gem_mapping *first;
421
422                 first = list_first_entry(&bo->mappings.list,
423                                          struct panfrost_gem_mapping,
424                                          node);
425
426                 /*
427                  * If we want to mark the BO purgeable, there must be only one
428                  * user: the caller FD.
429                  * We could do something smarter and mark the BO purgeable only
430                  * when all its users have marked it purgeable, but globally
431                  * visible/shared BOs are likely to never be marked purgeable
432                  * anyway, so let's not bother.
433                  */
434                 if (!list_is_singular(&bo->mappings.list) ||
435                     WARN_ON_ONCE(first->mmu != priv->mmu)) {
436                         ret = -EINVAL;
437                         goto out_unlock_mappings;
438                 }
439         }
440
441         args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
442
443         if (args->retained) {
444                 if (args->madv == PANFROST_MADV_DONTNEED)
445                         list_move_tail(&bo->base.madv_list,
446                                        &pfdev->shrinker_list);
447                 else if (args->madv == PANFROST_MADV_WILLNEED)
448                         list_del_init(&bo->base.madv_list);
449         }
450
451 out_unlock_mappings:
452         mutex_unlock(&bo->mappings.lock);
453         mutex_unlock(&pfdev->shrinker_lock);
454
455         drm_gem_object_put(gem_obj);
456         return ret;
457 }
458
459 int panfrost_unstable_ioctl_check(void)
460 {
461         if (!unstable_ioctls)
462                 return -ENOSYS;
463
464         return 0;
465 }
466
467 static int
468 panfrost_open(struct drm_device *dev, struct drm_file *file)
469 {
470         int ret;
471         struct panfrost_device *pfdev = dev->dev_private;
472         struct panfrost_file_priv *panfrost_priv;
473
474         panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
475         if (!panfrost_priv)
476                 return -ENOMEM;
477
478         panfrost_priv->pfdev = pfdev;
479         file->driver_priv = panfrost_priv;
480
481         panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
482         if (IS_ERR(panfrost_priv->mmu)) {
483                 ret = PTR_ERR(panfrost_priv->mmu);
484                 goto err_free;
485         }
486
487         ret = panfrost_job_open(panfrost_priv);
488         if (ret)
489                 goto err_job;
490
491         return 0;
492
493 err_job:
494         panfrost_mmu_ctx_put(panfrost_priv->mmu);
495 err_free:
496         kfree(panfrost_priv);
497         return ret;
498 }
499
500 static void
501 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
502 {
503         struct panfrost_file_priv *panfrost_priv = file->driver_priv;
504
505         panfrost_perfcnt_close(file);
506         panfrost_job_close(panfrost_priv);
507
508         panfrost_mmu_ctx_put(panfrost_priv->mmu);
509         kfree(panfrost_priv);
510 }
511
512 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
513 #define PANFROST_IOCTL(n, func, flags) \
514         DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
515
516         PANFROST_IOCTL(SUBMIT,          submit,         DRM_RENDER_ALLOW),
517         PANFROST_IOCTL(WAIT_BO,         wait_bo,        DRM_RENDER_ALLOW),
518         PANFROST_IOCTL(CREATE_BO,       create_bo,      DRM_RENDER_ALLOW),
519         PANFROST_IOCTL(MMAP_BO,         mmap_bo,        DRM_RENDER_ALLOW),
520         PANFROST_IOCTL(GET_PARAM,       get_param,      DRM_RENDER_ALLOW),
521         PANFROST_IOCTL(GET_BO_OFFSET,   get_bo_offset,  DRM_RENDER_ALLOW),
522         PANFROST_IOCTL(PERFCNT_ENABLE,  perfcnt_enable, DRM_RENDER_ALLOW),
523         PANFROST_IOCTL(PERFCNT_DUMP,    perfcnt_dump,   DRM_RENDER_ALLOW),
524         PANFROST_IOCTL(MADVISE,         madvise,        DRM_RENDER_ALLOW),
525 };
526
527 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
528
529 /*
530  * Panfrost driver version:
531  * - 1.0 - initial interface
532  * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
533  * - 1.2 - adds AFBC_FEATURES query
534  */
535 static const struct drm_driver panfrost_drm_driver = {
536         .driver_features        = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
537         .open                   = panfrost_open,
538         .postclose              = panfrost_postclose,
539         .ioctls                 = panfrost_drm_driver_ioctls,
540         .num_ioctls             = ARRAY_SIZE(panfrost_drm_driver_ioctls),
541         .fops                   = &panfrost_drm_driver_fops,
542         .name                   = "panfrost",
543         .desc                   = "panfrost DRM",
544         .date                   = "20180908",
545         .major                  = 1,
546         .minor                  = 2,
547
548         .gem_create_object      = panfrost_gem_create_object,
549         .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
550         .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
551         .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
552         .gem_prime_mmap         = drm_gem_prime_mmap,
553 };
554
555 static int panfrost_probe(struct platform_device *pdev)
556 {
557         struct panfrost_device *pfdev;
558         struct drm_device *ddev;
559         int err;
560
561         pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
562         if (!pfdev)
563                 return -ENOMEM;
564
565         pfdev->pdev = pdev;
566         pfdev->dev = &pdev->dev;
567
568         platform_set_drvdata(pdev, pfdev);
569
570         pfdev->comp = of_device_get_match_data(&pdev->dev);
571         if (!pfdev->comp)
572                 return -ENODEV;
573
574         pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
575
576         /* Allocate and initialize the DRM device. */
577         ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
578         if (IS_ERR(ddev))
579                 return PTR_ERR(ddev);
580
581         ddev->dev_private = pfdev;
582         pfdev->ddev = ddev;
583
584         mutex_init(&pfdev->shrinker_lock);
585         INIT_LIST_HEAD(&pfdev->shrinker_list);
586
587         err = panfrost_device_init(pfdev);
588         if (err) {
589                 if (err != -EPROBE_DEFER)
590                         dev_err(&pdev->dev, "Fatal error during GPU init\n");
591                 goto err_out0;
592         }
593
594         pm_runtime_set_active(pfdev->dev);
595         pm_runtime_mark_last_busy(pfdev->dev);
596         pm_runtime_enable(pfdev->dev);
597         pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
598         pm_runtime_use_autosuspend(pfdev->dev);
599
600         /*
601          * Register the DRM device with the core and the connectors with
602          * sysfs
603          */
604         err = drm_dev_register(ddev, 0);
605         if (err < 0)
606                 goto err_out1;
607
608         panfrost_gem_shrinker_init(ddev);
609
610         return 0;
611
612 err_out1:
613         pm_runtime_disable(pfdev->dev);
614         panfrost_device_fini(pfdev);
615         pm_runtime_set_suspended(pfdev->dev);
616 err_out0:
617         drm_dev_put(ddev);
618         return err;
619 }
620
621 static int panfrost_remove(struct platform_device *pdev)
622 {
623         struct panfrost_device *pfdev = platform_get_drvdata(pdev);
624         struct drm_device *ddev = pfdev->ddev;
625
626         drm_dev_unregister(ddev);
627         panfrost_gem_shrinker_cleanup(ddev);
628
629         pm_runtime_get_sync(pfdev->dev);
630         pm_runtime_disable(pfdev->dev);
631         panfrost_device_fini(pfdev);
632         pm_runtime_set_suspended(pfdev->dev);
633
634         drm_dev_put(ddev);
635         return 0;
636 }
637
638 /*
639  * The OPP core wants the supply names to be NULL terminated, but we need the
640  * correct num_supplies value for regulator core. Hence, we NULL terminate here
641  * and then initialize num_supplies with ARRAY_SIZE - 1.
642  */
643 static const char * const default_supplies[] = { "mali", NULL };
644 static const struct panfrost_compatible default_data = {
645         .num_supplies = ARRAY_SIZE(default_supplies) - 1,
646         .supply_names = default_supplies,
647         .num_pm_domains = 1, /* optional */
648         .pm_domain_names = NULL,
649 };
650
651 static const struct panfrost_compatible amlogic_data = {
652         .num_supplies = ARRAY_SIZE(default_supplies) - 1,
653         .supply_names = default_supplies,
654         .vendor_quirk = panfrost_gpu_amlogic_quirk,
655 };
656
657 static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
658 static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
659 static const struct panfrost_compatible mediatek_mt8183_data = {
660         .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
661         .supply_names = mediatek_mt8183_supplies,
662         .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
663         .pm_domain_names = mediatek_mt8183_pm_domains,
664 };
665
666 static const struct of_device_id dt_match[] = {
667         /* Set first to probe before the generic compatibles */
668         { .compatible = "amlogic,meson-gxm-mali",
669           .data = &amlogic_data, },
670         { .compatible = "amlogic,meson-g12a-mali",
671           .data = &amlogic_data, },
672         { .compatible = "arm,mali-t604", .data = &default_data, },
673         { .compatible = "arm,mali-t624", .data = &default_data, },
674         { .compatible = "arm,mali-t628", .data = &default_data, },
675         { .compatible = "arm,mali-t720", .data = &default_data, },
676         { .compatible = "arm,mali-t760", .data = &default_data, },
677         { .compatible = "arm,mali-t820", .data = &default_data, },
678         { .compatible = "arm,mali-t830", .data = &default_data, },
679         { .compatible = "arm,mali-t860", .data = &default_data, },
680         { .compatible = "arm,mali-t880", .data = &default_data, },
681         { .compatible = "arm,mali-bifrost", .data = &default_data, },
682         { .compatible = "arm,mali-valhall-jm", .data = &default_data, },
683         { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
684         {}
685 };
686 MODULE_DEVICE_TABLE(of, dt_match);
687
688 static struct platform_driver panfrost_driver = {
689         .probe          = panfrost_probe,
690         .remove         = panfrost_remove,
691         .driver         = {
692                 .name   = "panfrost",
693                 .pm     = pm_ptr(&panfrost_pm_ops),
694                 .of_match_table = dt_match,
695         },
696 };
697 module_platform_driver(panfrost_driver);
698
699 MODULE_AUTHOR("Panfrost Project Developers");
700 MODULE_DESCRIPTION("Panfrost DRM Driver");
701 MODULE_LICENSE("GPL v2");