1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
6 #include <linux/module.h>
7 #include <linux/of_platform.h>
8 #include <linux/pagemap.h>
9 #include <linux/pm_runtime.h>
10 #include <drm/panfrost_drm.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_syncobj.h>
14 #include <drm/drm_utils.h>
16 #include "panfrost_device.h"
17 #include "panfrost_devfreq.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_mmu.h"
20 #include "panfrost_job.h"
21 #include "panfrost_gpu.h"
22 #include "panfrost_perfcnt.h"
24 static bool unstable_ioctls;
25 module_param_unsafe(unstable_ioctls, bool, 0600);
27 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
29 struct drm_panfrost_get_param *param = data;
30 struct panfrost_device *pfdev = ddev->dev_private;
35 #define PANFROST_FEATURE(name, member) \
36 case DRM_PANFROST_PARAM_ ## name: \
37 param->value = pfdev->features.member; \
39 #define PANFROST_FEATURE_ARRAY(name, member, max) \
40 case DRM_PANFROST_PARAM_ ## name ## 0 ... \
41 DRM_PANFROST_PARAM_ ## name ## max: \
42 param->value = pfdev->features.member[param->param - \
43 DRM_PANFROST_PARAM_ ## name ## 0]; \
46 switch (param->param) {
47 PANFROST_FEATURE(GPU_PROD_ID, id);
48 PANFROST_FEATURE(GPU_REVISION, revision);
49 PANFROST_FEATURE(SHADER_PRESENT, shader_present);
50 PANFROST_FEATURE(TILER_PRESENT, tiler_present);
51 PANFROST_FEATURE(L2_PRESENT, l2_present);
52 PANFROST_FEATURE(STACK_PRESENT, stack_present);
53 PANFROST_FEATURE(AS_PRESENT, as_present);
54 PANFROST_FEATURE(JS_PRESENT, js_present);
55 PANFROST_FEATURE(L2_FEATURES, l2_features);
56 PANFROST_FEATURE(CORE_FEATURES, core_features);
57 PANFROST_FEATURE(TILER_FEATURES, tiler_features);
58 PANFROST_FEATURE(MEM_FEATURES, mem_features);
59 PANFROST_FEATURE(MMU_FEATURES, mmu_features);
60 PANFROST_FEATURE(THREAD_FEATURES, thread_features);
61 PANFROST_FEATURE(MAX_THREADS, max_threads);
62 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
63 thread_max_workgroup_sz);
64 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
65 thread_max_barrier_sz);
66 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
67 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
68 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
69 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
70 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
78 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
79 struct drm_file *file)
81 struct panfrost_file_priv *priv = file->driver_priv;
82 struct panfrost_gem_object *bo;
83 struct drm_panfrost_create_bo *args = data;
84 struct panfrost_gem_mapping *mapping;
86 if (!args->size || args->pad ||
87 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
90 /* Heaps should never be executable */
91 if ((args->flags & PANFROST_BO_HEAP) &&
92 !(args->flags & PANFROST_BO_NOEXEC))
95 bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
100 mapping = panfrost_gem_mapping_get(bo, priv);
102 drm_gem_object_put_unlocked(&bo->base.base);
106 args->offset = mapping->mmnode.start << PAGE_SHIFT;
107 panfrost_gem_mapping_put(mapping);
113 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
114 * referenced by the job.
116 * @file_priv: DRM file for this fd
118 * @job: job being set up
120 * Resolve handles from userspace to BOs and attach them to job.
122 * Note that this function doesn't need to unreference the BOs on
123 * failure, because that will happen at panfrost_job_cleanup() time.
126 panfrost_lookup_bos(struct drm_device *dev,
127 struct drm_file *file_priv,
128 struct drm_panfrost_submit *args,
129 struct panfrost_job *job)
131 struct panfrost_file_priv *priv = file_priv->driver_priv;
132 struct panfrost_gem_object *bo;
136 job->bo_count = args->bo_handle_count;
141 job->implicit_fences = kvmalloc_array(job->bo_count,
142 sizeof(struct dma_fence *),
143 GFP_KERNEL | __GFP_ZERO);
144 if (!job->implicit_fences)
147 ret = drm_gem_objects_lookup(file_priv,
148 (void __user *)(uintptr_t)args->bo_handles,
149 job->bo_count, &job->bos);
153 job->mappings = kvmalloc_array(job->bo_count,
154 sizeof(struct panfrost_gem_mapping *),
155 GFP_KERNEL | __GFP_ZERO);
159 for (i = 0; i < job->bo_count; i++) {
160 struct panfrost_gem_mapping *mapping;
162 bo = to_panfrost_bo(job->bos[i]);
163 mapping = panfrost_gem_mapping_get(bo, priv);
169 atomic_inc(&bo->gpu_usecount);
170 job->mappings[i] = mapping;
177 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
178 * referenced by the job.
180 * @file_priv: DRM file for this fd
182 * @job: job being set up
184 * Resolve syncobjs from userspace to fences and attach them to job.
186 * Note that this function doesn't need to unreference the fences on
187 * failure, because that will happen at panfrost_job_cleanup() time.
190 panfrost_copy_in_sync(struct drm_device *dev,
191 struct drm_file *file_priv,
192 struct drm_panfrost_submit *args,
193 struct panfrost_job *job)
199 job->in_fence_count = args->in_sync_count;
201 if (!job->in_fence_count)
204 job->in_fences = kvmalloc_array(job->in_fence_count,
205 sizeof(struct dma_fence *),
206 GFP_KERNEL | __GFP_ZERO);
207 if (!job->in_fences) {
208 DRM_DEBUG("Failed to allocate job in fences\n");
212 handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
215 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
219 if (copy_from_user(handles,
220 (void __user *)(uintptr_t)args->in_syncs,
221 job->in_fence_count * sizeof(u32))) {
223 DRM_DEBUG("Failed to copy in syncobj handles\n");
227 for (i = 0; i < job->in_fence_count; i++) {
228 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
239 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
240 struct drm_file *file)
242 struct panfrost_device *pfdev = dev->dev_private;
243 struct drm_panfrost_submit *args = data;
244 struct drm_syncobj *sync_out = NULL;
245 struct panfrost_job *job;
251 if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
254 if (args->out_sync > 0) {
255 sync_out = drm_syncobj_find(file, args->out_sync);
260 job = kzalloc(sizeof(*job), GFP_KERNEL);
266 kref_init(&job->refcount);
270 job->requirements = args->requirements;
271 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
272 job->file_priv = file->driver_priv;
274 ret = panfrost_copy_in_sync(dev, file, args, job);
278 ret = panfrost_lookup_bos(dev, file, args, job);
282 ret = panfrost_job_push(job);
286 /* Update the return sync object for the job */
288 drm_syncobj_replace_fence(sync_out, job->render_done_fence);
291 panfrost_job_put(job);
294 drm_syncobj_put(sync_out);
300 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
301 struct drm_file *file_priv)
304 struct drm_panfrost_wait_bo *args = data;
305 struct drm_gem_object *gem_obj;
306 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
311 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
315 ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
318 ret = timeout ? -ETIMEDOUT : -EBUSY;
320 drm_gem_object_put_unlocked(gem_obj);
325 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
328 struct drm_panfrost_mmap_bo *args = data;
329 struct drm_gem_object *gem_obj;
332 if (args->flags != 0) {
333 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
337 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
339 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
343 /* Don't allow mmapping of heap objects as pages are not pinned. */
344 if (to_panfrost_bo(gem_obj)->is_heap) {
349 ret = drm_gem_create_mmap_offset(gem_obj);
351 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
354 drm_gem_object_put_unlocked(gem_obj);
358 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
361 struct panfrost_file_priv *priv = file_priv->driver_priv;
362 struct drm_panfrost_get_bo_offset *args = data;
363 struct panfrost_gem_mapping *mapping;
364 struct drm_gem_object *gem_obj;
365 struct panfrost_gem_object *bo;
367 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
369 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
372 bo = to_panfrost_bo(gem_obj);
374 mapping = panfrost_gem_mapping_get(bo, priv);
375 drm_gem_object_put_unlocked(gem_obj);
380 args->offset = mapping->mmnode.start << PAGE_SHIFT;
381 panfrost_gem_mapping_put(mapping);
385 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
386 struct drm_file *file_priv)
388 struct panfrost_file_priv *priv = file_priv->driver_priv;
389 struct drm_panfrost_madvise *args = data;
390 struct panfrost_device *pfdev = dev->dev_private;
391 struct drm_gem_object *gem_obj;
392 struct panfrost_gem_object *bo;
395 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
397 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
401 bo = to_panfrost_bo(gem_obj);
403 mutex_lock(&pfdev->shrinker_lock);
404 mutex_lock(&bo->mappings.lock);
405 if (args->madv == PANFROST_MADV_DONTNEED) {
406 struct panfrost_gem_mapping *first;
408 first = list_first_entry(&bo->mappings.list,
409 struct panfrost_gem_mapping,
413 * If we want to mark the BO purgeable, there must be only one
414 * user: the caller FD.
415 * We could do something smarter and mark the BO purgeable only
416 * when all its users have marked it purgeable, but globally
417 * visible/shared BOs are likely to never be marked purgeable
418 * anyway, so let's not bother.
420 if (!list_is_singular(&bo->mappings.list) ||
421 WARN_ON_ONCE(first->mmu != &priv->mmu)) {
423 goto out_unlock_mappings;
427 args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
429 if (args->retained) {
430 if (args->madv == PANFROST_MADV_DONTNEED)
431 list_add_tail(&bo->base.madv_list,
432 &pfdev->shrinker_list);
433 else if (args->madv == PANFROST_MADV_WILLNEED)
434 list_del_init(&bo->base.madv_list);
438 mutex_unlock(&bo->mappings.lock);
439 mutex_unlock(&pfdev->shrinker_lock);
441 drm_gem_object_put_unlocked(gem_obj);
445 int panfrost_unstable_ioctl_check(void)
447 if (!unstable_ioctls)
453 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
454 #define PFN_4G_MASK (PFN_4G - 1)
455 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
457 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
459 u64 *start, u64 *end)
461 /* Executable buffers can't start or end on a 4GB boundary */
462 if (!(color & PANFROST_BO_NOEXEC)) {
465 if ((*start & PFN_4G_MASK) == 0)
468 if ((*end & PFN_4G_MASK) == 0)
471 next_seg = ALIGN(*start, PFN_4G);
472 if (next_seg - *start <= PFN_16M)
473 *start = next_seg + 1;
475 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
480 panfrost_open(struct drm_device *dev, struct drm_file *file)
483 struct panfrost_device *pfdev = dev->dev_private;
484 struct panfrost_file_priv *panfrost_priv;
486 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
490 panfrost_priv->pfdev = pfdev;
491 file->driver_priv = panfrost_priv;
493 spin_lock_init(&panfrost_priv->mm_lock);
495 /* 4G enough for now. can be 48-bit */
496 drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
497 panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
499 ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
503 ret = panfrost_job_open(panfrost_priv);
510 panfrost_mmu_pgtable_free(panfrost_priv);
512 drm_mm_takedown(&panfrost_priv->mm);
513 kfree(panfrost_priv);
518 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
520 struct panfrost_file_priv *panfrost_priv = file->driver_priv;
522 panfrost_perfcnt_close(file);
523 panfrost_job_close(panfrost_priv);
525 panfrost_mmu_pgtable_free(panfrost_priv);
526 drm_mm_takedown(&panfrost_priv->mm);
527 kfree(panfrost_priv);
530 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
531 #define PANFROST_IOCTL(n, func, flags) \
532 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
534 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
535 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
536 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
537 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
538 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
539 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
540 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
541 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
542 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
545 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
548 * Panfrost driver version:
549 * - 1.0 - initial interface
550 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
552 static struct drm_driver panfrost_drm_driver = {
553 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
554 .open = panfrost_open,
555 .postclose = panfrost_postclose,
556 .ioctls = panfrost_drm_driver_ioctls,
557 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
558 .fops = &panfrost_drm_driver_fops,
560 .desc = "panfrost DRM",
565 .gem_create_object = panfrost_gem_create_object,
566 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
567 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
568 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
569 .gem_prime_mmap = drm_gem_prime_mmap,
572 static int panfrost_probe(struct platform_device *pdev)
574 struct panfrost_device *pfdev;
575 struct drm_device *ddev;
578 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
583 pfdev->dev = &pdev->dev;
585 platform_set_drvdata(pdev, pfdev);
587 /* Allocate and initialze the DRM device. */
588 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
590 return PTR_ERR(ddev);
592 ddev->dev_private = pfdev;
595 mutex_init(&pfdev->shrinker_lock);
596 INIT_LIST_HEAD(&pfdev->shrinker_list);
598 err = panfrost_device_init(pfdev);
600 if (err != -EPROBE_DEFER)
601 dev_err(&pdev->dev, "Fatal error during GPU init\n");
605 err = panfrost_devfreq_init(pfdev);
607 if (err != -EPROBE_DEFER)
608 dev_err(&pdev->dev, "Fatal error during devfreq init\n");
612 pm_runtime_set_active(pfdev->dev);
613 pm_runtime_mark_last_busy(pfdev->dev);
614 pm_runtime_enable(pfdev->dev);
615 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
616 pm_runtime_use_autosuspend(pfdev->dev);
619 * Register the DRM device with the core and the connectors with
622 err = drm_dev_register(ddev, 0);
626 panfrost_gem_shrinker_init(ddev);
631 pm_runtime_disable(pfdev->dev);
632 panfrost_devfreq_fini(pfdev);
634 panfrost_device_fini(pfdev);
640 static int panfrost_remove(struct platform_device *pdev)
642 struct panfrost_device *pfdev = platform_get_drvdata(pdev);
643 struct drm_device *ddev = pfdev->ddev;
645 drm_dev_unregister(ddev);
646 panfrost_gem_shrinker_cleanup(ddev);
648 pm_runtime_get_sync(pfdev->dev);
649 panfrost_devfreq_fini(pfdev);
650 panfrost_device_fini(pfdev);
651 pm_runtime_put_sync_suspend(pfdev->dev);
652 pm_runtime_disable(pfdev->dev);
658 static const struct of_device_id dt_match[] = {
659 { .compatible = "arm,mali-t604" },
660 { .compatible = "arm,mali-t624" },
661 { .compatible = "arm,mali-t628" },
662 { .compatible = "arm,mali-t720" },
663 { .compatible = "arm,mali-t760" },
664 { .compatible = "arm,mali-t820" },
665 { .compatible = "arm,mali-t830" },
666 { .compatible = "arm,mali-t860" },
667 { .compatible = "arm,mali-t880" },
670 MODULE_DEVICE_TABLE(of, dt_match);
672 static const struct dev_pm_ops panfrost_pm_ops = {
673 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
674 SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
677 static struct platform_driver panfrost_driver = {
678 .probe = panfrost_probe,
679 .remove = panfrost_remove,
682 .pm = &panfrost_pm_ops,
683 .of_match_table = dt_match,
686 module_platform_driver(panfrost_driver);
688 MODULE_AUTHOR("Panfrost Project Developers");
689 MODULE_DESCRIPTION("Panfrost DRM Driver");
690 MODULE_LICENSE("GPL v2");