2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
29 #include <linux/sched/signal.h>
31 #include "uapi/drm/vc4_drm.h"
34 #include "vc4_trace.h"
37 vc4_queue_hangcheck(struct drm_device *dev)
39 struct vc4_dev *vc4 = to_vc4_dev(dev);
41 mod_timer(&vc4->hangcheck.timer,
42 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
45 struct vc4_hang_state {
46 struct drm_vc4_get_hang_state user_state;
49 struct drm_gem_object **bo;
53 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_put_unlocked(state->bo[i]);
64 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
67 struct drm_vc4_get_hang_state *get_state = data;
68 struct drm_vc4_get_hang_state_bo *bo_state;
69 struct vc4_hang_state *kernel_state;
70 struct drm_vc4_get_hang_state *state;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 unsigned long irqflags;
76 spin_lock_irqsave(&vc4->job_lock, irqflags);
77 kernel_state = vc4->hang_state;
79 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
82 state = &kernel_state->user_state;
84 /* If the user's array isn't big enough, just return the
85 * required array size.
87 if (get_state->bo_count < state->bo_count) {
88 get_state->bo_count = state->bo_count;
89 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
93 vc4->hang_state = NULL;
94 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state->bo = get_state->bo;
98 memcpy(get_state, state, sizeof(*state));
100 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
106 for (i = 0; i < state->bo_count; i++) {
107 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
110 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
115 goto err_delete_handle;
117 bo_state[i].handle = handle;
118 bo_state[i].paddr = vc4_bo->base.paddr;
119 bo_state[i].size = vc4_bo->base.base.size;
122 if (copy_to_user(u64_to_user_ptr(get_state->bo),
124 state->bo_count * sizeof(*bo_state)))
129 for (i = 0; i < state->bo_count; i++)
130 drm_gem_handle_delete(file_priv, bo_state[i].handle);
134 vc4_free_hang_state(dev, kernel_state);
141 vc4_save_hang_state(struct drm_device *dev)
143 struct vc4_dev *vc4 = to_vc4_dev(dev);
144 struct drm_vc4_get_hang_state *state;
145 struct vc4_hang_state *kernel_state;
146 struct vc4_exec_info *exec[2];
148 unsigned long irqflags;
149 unsigned int i, j, k, unref_list_count;
151 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
155 state = &kernel_state->user_state;
157 spin_lock_irqsave(&vc4->job_lock, irqflags);
158 exec[0] = vc4_first_bin_job(vc4);
159 exec[1] = vc4_first_render_job(vc4);
160 if (!exec[0] && !exec[1]) {
161 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
165 /* Get the bos from both binner and renderer into hang state. */
167 for (i = 0; i < 2; i++) {
171 unref_list_count = 0;
172 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
174 state->bo_count += exec[i]->bo_count + unref_list_count;
177 kernel_state->bo = kcalloc(state->bo_count,
178 sizeof(*kernel_state->bo), GFP_ATOMIC);
180 if (!kernel_state->bo) {
181 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
186 for (i = 0; i < 2; i++) {
190 for (j = 0; j < exec[i]->bo_count; j++) {
191 bo = to_vc4_bo(&exec[i]->bo[j]->base);
193 /* Retain BOs just in case they were marked purgeable.
194 * This prevents the BO from being purged before
195 * someone had a chance to dump the hang state.
197 WARN_ON(!refcount_read(&bo->usecnt));
198 refcount_inc(&bo->usecnt);
199 drm_gem_object_get(&exec[i]->bo[j]->base);
200 kernel_state->bo[k++] = &exec[i]->bo[j]->base;
203 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
204 /* No need to retain BOs coming from the ->unref_list
205 * because they are naturally unpurgeable.
207 drm_gem_object_get(&bo->base.base);
208 kernel_state->bo[k++] = &bo->base.base;
212 WARN_ON_ONCE(k != state->bo_count);
215 state->start_bin = exec[0]->ct0ca;
217 state->start_render = exec[1]->ct1ca;
219 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
221 state->ct0ca = V3D_READ(V3D_CTNCA(0));
222 state->ct0ea = V3D_READ(V3D_CTNEA(0));
224 state->ct1ca = V3D_READ(V3D_CTNCA(1));
225 state->ct1ea = V3D_READ(V3D_CTNEA(1));
227 state->ct0cs = V3D_READ(V3D_CTNCS(0));
228 state->ct1cs = V3D_READ(V3D_CTNCS(1));
230 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
231 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
233 state->bpca = V3D_READ(V3D_BPCA);
234 state->bpcs = V3D_READ(V3D_BPCS);
235 state->bpoa = V3D_READ(V3D_BPOA);
236 state->bpos = V3D_READ(V3D_BPOS);
238 state->vpmbase = V3D_READ(V3D_VPMBASE);
240 state->dbge = V3D_READ(V3D_DBGE);
241 state->fdbgo = V3D_READ(V3D_FDBGO);
242 state->fdbgb = V3D_READ(V3D_FDBGB);
243 state->fdbgr = V3D_READ(V3D_FDBGR);
244 state->fdbgs = V3D_READ(V3D_FDBGS);
245 state->errstat = V3D_READ(V3D_ERRSTAT);
247 /* We need to turn purgeable BOs into unpurgeable ones so that
248 * userspace has a chance to dump the hang state before the kernel
249 * decides to purge those BOs.
250 * Note that BO consistency at dump time cannot be guaranteed. For
251 * example, if the owner of these BOs decides to re-use them or mark
252 * them purgeable again there's nothing we can do to prevent it.
254 for (i = 0; i < kernel_state->user_state.bo_count; i++) {
255 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
257 if (bo->madv == __VC4_MADV_NOTSUPP)
260 mutex_lock(&bo->madv_lock);
261 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
262 bo->madv = VC4_MADV_WILLNEED;
263 refcount_dec(&bo->usecnt);
264 mutex_unlock(&bo->madv_lock);
267 spin_lock_irqsave(&vc4->job_lock, irqflags);
268 if (vc4->hang_state) {
269 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
270 vc4_free_hang_state(dev, kernel_state);
272 vc4->hang_state = kernel_state;
273 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 vc4_reset(struct drm_device *dev)
280 struct vc4_dev *vc4 = to_vc4_dev(dev);
282 DRM_INFO("Resetting GPU.\n");
284 mutex_lock(&vc4->power_lock);
285 if (vc4->power_refcount) {
286 /* Power the device off and back on the by dropping the
287 * reference on runtime PM.
289 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
290 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
292 mutex_unlock(&vc4->power_lock);
296 /* Rearm the hangcheck -- another job might have been waiting
297 * for our hung one to get kicked off, and vc4_irq_reset()
298 * would have started it.
300 vc4_queue_hangcheck(dev);
304 vc4_reset_work(struct work_struct *work)
306 struct vc4_dev *vc4 =
307 container_of(work, struct vc4_dev, hangcheck.reset_work);
309 vc4_save_hang_state(vc4->dev);
315 vc4_hangcheck_elapsed(struct timer_list *t)
317 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
318 struct drm_device *dev = vc4->dev;
319 uint32_t ct0ca, ct1ca;
320 unsigned long irqflags;
321 struct vc4_exec_info *bin_exec, *render_exec;
323 spin_lock_irqsave(&vc4->job_lock, irqflags);
325 bin_exec = vc4_first_bin_job(vc4);
326 render_exec = vc4_first_render_job(vc4);
328 /* If idle, we can stop watching for hangs. */
329 if (!bin_exec && !render_exec) {
330 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
334 ct0ca = V3D_READ(V3D_CTNCA(0));
335 ct1ca = V3D_READ(V3D_CTNCA(1));
337 /* If we've made any progress in execution, rearm the timer
340 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
341 (render_exec && ct1ca != render_exec->last_ct1ca)) {
343 bin_exec->last_ct0ca = ct0ca;
345 render_exec->last_ct1ca = ct1ca;
346 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
347 vc4_queue_hangcheck(dev);
351 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
353 /* We've gone too long with no progress, reset. This has to
354 * be done from a work struct, since resetting can sleep and
355 * this timer hook isn't allowed to.
357 schedule_work(&vc4->hangcheck.reset_work);
361 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
363 struct vc4_dev *vc4 = to_vc4_dev(dev);
365 /* Set the current and end address of the control list.
366 * Writing the end register is what starts the job.
368 V3D_WRITE(V3D_CTNCA(thread), start);
369 V3D_WRITE(V3D_CTNEA(thread), end);
373 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
376 struct vc4_dev *vc4 = to_vc4_dev(dev);
378 unsigned long timeout_expire;
381 if (vc4->finished_seqno >= seqno)
387 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
389 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
391 prepare_to_wait(&vc4->job_wait_queue, &wait,
392 interruptible ? TASK_INTERRUPTIBLE :
393 TASK_UNINTERRUPTIBLE);
395 if (interruptible && signal_pending(current)) {
400 if (vc4->finished_seqno >= seqno)
403 if (timeout_ns != ~0ull) {
404 if (time_after_eq(jiffies, timeout_expire)) {
408 schedule_timeout(timeout_expire - jiffies);
414 finish_wait(&vc4->job_wait_queue, &wait);
415 trace_vc4_wait_for_seqno_end(dev, seqno);
421 vc4_flush_caches(struct drm_device *dev)
423 struct vc4_dev *vc4 = to_vc4_dev(dev);
425 /* Flush the GPU L2 caches. These caches sit on top of system
426 * L3 (the 128kb or so shared with the CPU), and are
427 * non-allocating in the L3.
429 V3D_WRITE(V3D_L2CACTL,
432 V3D_WRITE(V3D_SLCACTL,
433 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
434 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
435 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
436 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
440 vc4_flush_texture_caches(struct drm_device *dev)
442 struct vc4_dev *vc4 = to_vc4_dev(dev);
444 V3D_WRITE(V3D_L2CACTL,
447 V3D_WRITE(V3D_SLCACTL,
448 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
449 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
452 /* Sets the registers for the next job to be actually be executed in
455 * The job_lock should be held during this.
458 vc4_submit_next_bin_job(struct drm_device *dev)
460 struct vc4_dev *vc4 = to_vc4_dev(dev);
461 struct vc4_exec_info *exec;
464 exec = vc4_first_bin_job(vc4);
468 vc4_flush_caches(dev);
470 /* Only start the perfmon if it was not already started by a previous
473 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
474 vc4_perfmon_start(vc4, exec->perfmon);
476 /* Either put the job in the binner if it uses the binner, or
477 * immediately move it to the to-be-rendered queue.
479 if (exec->ct0ca != exec->ct0ea) {
480 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
482 struct vc4_exec_info *next;
484 vc4_move_job_to_render(dev, exec);
485 next = vc4_first_bin_job(vc4);
487 /* We can't start the next bin job if the previous job had a
488 * different perfmon instance attached to it. The same goes
489 * if one of them had a perfmon attached to it and the other
492 if (next && next->perfmon == exec->perfmon)
498 vc4_submit_next_render_job(struct drm_device *dev)
500 struct vc4_dev *vc4 = to_vc4_dev(dev);
501 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
506 /* A previous RCL may have written to one of our textures, and
507 * our full cache flush at bin time may have occurred before
508 * that RCL completed. Flush the texture cache now, but not
509 * the instructions or uniforms (since we don't write those
512 vc4_flush_texture_caches(dev);
514 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
518 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
520 struct vc4_dev *vc4 = to_vc4_dev(dev);
521 bool was_empty = list_empty(&vc4->render_job_list);
523 list_move_tail(&exec->head, &vc4->render_job_list);
525 vc4_submit_next_render_job(dev);
529 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
534 for (i = 0; i < exec->bo_count; i++) {
535 bo = to_vc4_bo(&exec->bo[i]->base);
538 reservation_object_add_shared_fence(bo->resv, exec->fence);
541 list_for_each_entry(bo, &exec->unref_list, unref_head) {
545 for (i = 0; i < exec->rcl_write_bo_count; i++) {
546 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
547 bo->write_seqno = seqno;
549 reservation_object_add_excl_fence(bo->resv, exec->fence);
554 vc4_unlock_bo_reservations(struct drm_device *dev,
555 struct vc4_exec_info *exec,
556 struct ww_acquire_ctx *acquire_ctx)
560 for (i = 0; i < exec->bo_count; i++) {
561 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
563 ww_mutex_unlock(&bo->resv->lock);
566 ww_acquire_fini(acquire_ctx);
569 /* Takes the reservation lock on all the BOs being referenced, so that
570 * at queue submit time we can update the reservations.
572 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
573 * (all of which are on exec->unref_list). They're entirely private
574 * to vc4, so we don't attach dma-buf fences to them.
577 vc4_lock_bo_reservations(struct drm_device *dev,
578 struct vc4_exec_info *exec,
579 struct ww_acquire_ctx *acquire_ctx)
581 int contended_lock = -1;
585 ww_acquire_init(acquire_ctx, &reservation_ww_class);
588 if (contended_lock != -1) {
589 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
590 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
593 ww_acquire_done(acquire_ctx);
598 for (i = 0; i < exec->bo_count; i++) {
599 if (i == contended_lock)
602 bo = to_vc4_bo(&exec->bo[i]->base);
604 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
608 for (j = 0; j < i; j++) {
609 bo = to_vc4_bo(&exec->bo[j]->base);
610 ww_mutex_unlock(&bo->resv->lock);
613 if (contended_lock != -1 && contended_lock >= i) {
614 bo = to_vc4_bo(&exec->bo[contended_lock]->base);
616 ww_mutex_unlock(&bo->resv->lock);
619 if (ret == -EDEADLK) {
624 ww_acquire_done(acquire_ctx);
629 ww_acquire_done(acquire_ctx);
631 /* Reserve space for our shared (read-only) fence references,
632 * before we commit the CL to the hardware.
634 for (i = 0; i < exec->bo_count; i++) {
635 bo = to_vc4_bo(&exec->bo[i]->base);
637 ret = reservation_object_reserve_shared(bo->resv);
639 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
647 /* Queues a struct vc4_exec_info for execution. If no job is
648 * currently executing, then submits it.
650 * Unlike most GPUs, our hardware only handles one command list at a
651 * time. To queue multiple jobs at once, we'd need to edit the
652 * previous command list to have a jump to the new one at the end, and
653 * then bump the end address. That's a change for a later date,
657 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
658 struct ww_acquire_ctx *acquire_ctx)
660 struct vc4_dev *vc4 = to_vc4_dev(dev);
661 struct vc4_exec_info *renderjob;
663 unsigned long irqflags;
664 struct vc4_fence *fence;
666 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
671 spin_lock_irqsave(&vc4->job_lock, irqflags);
673 seqno = ++vc4->emit_seqno;
676 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
677 vc4->dma_fence_context, exec->seqno);
678 fence->seqno = exec->seqno;
679 exec->fence = &fence->base;
681 vc4_update_bo_seqnos(exec, seqno);
683 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
685 list_add_tail(&exec->head, &vc4->bin_job_list);
687 /* If no bin job was executing and if the render job (if any) has the
688 * same perfmon as our job attached to it (or if both jobs don't have
689 * perfmon activated), then kick ours off. Otherwise, it'll get
690 * started when the previous job's flush/render done interrupt occurs.
692 renderjob = vc4_first_render_job(vc4);
693 if (vc4_first_bin_job(vc4) == exec &&
694 (!renderjob || renderjob->perfmon == exec->perfmon)) {
695 vc4_submit_next_bin_job(dev);
696 vc4_queue_hangcheck(dev);
699 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
705 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
706 * referenced by the job.
708 * @file_priv: DRM file for this fd
709 * @exec: V3D job being set up
711 * The command validator needs to reference BOs by their index within
712 * the submitted job's BO list. This does the validation of the job's
713 * BO list and reference counting for the lifetime of the job.
716 vc4_cl_lookup_bos(struct drm_device *dev,
717 struct drm_file *file_priv,
718 struct vc4_exec_info *exec)
720 struct drm_vc4_submit_cl *args = exec->args;
725 exec->bo_count = args->bo_handle_count;
727 if (!exec->bo_count) {
728 /* See comment on bo_index for why we have to check
731 DRM_DEBUG("Rendering requires BOs to validate\n");
735 exec->bo = kvmalloc_array(exec->bo_count,
736 sizeof(struct drm_gem_cma_object *),
737 GFP_KERNEL | __GFP_ZERO);
739 DRM_ERROR("Failed to allocate validated BO pointers\n");
743 handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
746 DRM_ERROR("Failed to allocate incoming GEM handles\n");
750 if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
751 exec->bo_count * sizeof(uint32_t))) {
753 DRM_ERROR("Failed to copy in GEM handles\n");
757 spin_lock(&file_priv->table_lock);
758 for (i = 0; i < exec->bo_count; i++) {
759 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
762 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
768 drm_gem_object_get(bo);
769 exec->bo[i] = (struct drm_gem_cma_object *)bo;
771 spin_unlock(&file_priv->table_lock);
776 for (i = 0; i < exec->bo_count; i++) {
777 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
779 goto fail_dec_usecnt;
786 /* Decrease usecnt on acquired objects.
787 * We cannot rely on vc4_complete_exec() to release resources here,
788 * because vc4_complete_exec() has no information about which BO has
789 * had its ->usecnt incremented.
790 * To make things easier we just free everything explicitly and set
791 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
794 for (i-- ; i >= 0; i--)
795 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
798 /* Release any reference to acquired objects. */
799 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
800 drm_gem_object_put_unlocked(&exec->bo[i]->base);
810 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
812 struct drm_vc4_submit_cl *args = exec->args;
816 uint32_t bin_offset = 0;
817 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
819 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
820 uint32_t exec_size = uniforms_offset + args->uniforms_size;
821 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
822 args->shader_rec_count);
825 if (shader_rec_offset < args->bin_cl_size ||
826 uniforms_offset < shader_rec_offset ||
827 exec_size < uniforms_offset ||
828 args->shader_rec_count >= (UINT_MAX /
829 sizeof(struct vc4_shader_state)) ||
830 temp_size < exec_size) {
831 DRM_DEBUG("overflow in exec arguments\n");
836 /* Allocate space where we'll store the copied in user command lists
837 * and shader records.
839 * We don't just copy directly into the BOs because we need to
840 * read the contents back for validation, and I think the
841 * bo->vaddr is uncached access.
843 temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
845 DRM_ERROR("Failed to allocate storage for copying "
846 "in bin/render CLs.\n");
850 bin = temp + bin_offset;
851 exec->shader_rec_u = temp + shader_rec_offset;
852 exec->uniforms_u = temp + uniforms_offset;
853 exec->shader_state = temp + exec_size;
854 exec->shader_state_size = args->shader_rec_count;
856 if (copy_from_user(bin,
857 u64_to_user_ptr(args->bin_cl),
858 args->bin_cl_size)) {
863 if (copy_from_user(exec->shader_rec_u,
864 u64_to_user_ptr(args->shader_rec),
865 args->shader_rec_size)) {
870 if (copy_from_user(exec->uniforms_u,
871 u64_to_user_ptr(args->uniforms),
872 args->uniforms_size)) {
877 bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
879 DRM_ERROR("Couldn't allocate BO for binning\n");
883 exec->exec_bo = &bo->base;
885 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
888 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
892 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
893 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
894 exec->shader_rec_size = args->shader_rec_size;
896 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
897 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
898 exec->uniforms_size = args->uniforms_size;
900 ret = vc4_validate_bin_cl(dev,
901 exec->exec_bo->vaddr + bin_offset,
907 ret = vc4_validate_shader_recs(dev, exec);
911 /* Block waiting on any previous rendering into the CS's VBO,
912 * IB, or textures, so that pixels are actually written by the
913 * time we try to read them.
915 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
923 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
925 struct vc4_dev *vc4 = to_vc4_dev(dev);
926 unsigned long irqflags;
929 /* If we got force-completed because of GPU reset rather than
930 * through our IRQ handler, signal the fence now.
933 dma_fence_signal(exec->fence);
934 dma_fence_put(exec->fence);
938 for (i = 0; i < exec->bo_count; i++) {
939 struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
941 vc4_bo_dec_usecnt(bo);
942 drm_gem_object_put_unlocked(&exec->bo[i]->base);
947 while (!list_empty(&exec->unref_list)) {
948 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
949 struct vc4_bo, unref_head);
950 list_del(&bo->unref_head);
951 drm_gem_object_put_unlocked(&bo->base.base);
954 /* Free up the allocation of any bin slots we used. */
955 spin_lock_irqsave(&vc4->job_lock, irqflags);
956 vc4->bin_alloc_used &= ~exec->bin_slots;
957 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
959 /* Release the reference we had on the perf monitor. */
960 vc4_perfmon_put(exec->perfmon);
962 mutex_lock(&vc4->power_lock);
963 if (--vc4->power_refcount == 0) {
964 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
965 pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
967 mutex_unlock(&vc4->power_lock);
973 vc4_job_handle_completed(struct vc4_dev *vc4)
975 unsigned long irqflags;
976 struct vc4_seqno_cb *cb, *cb_temp;
978 spin_lock_irqsave(&vc4->job_lock, irqflags);
979 while (!list_empty(&vc4->job_done_list)) {
980 struct vc4_exec_info *exec =
981 list_first_entry(&vc4->job_done_list,
982 struct vc4_exec_info, head);
983 list_del(&exec->head);
985 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
986 vc4_complete_exec(vc4->dev, exec);
987 spin_lock_irqsave(&vc4->job_lock, irqflags);
990 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
991 if (cb->seqno <= vc4->finished_seqno) {
992 list_del_init(&cb->work.entry);
993 schedule_work(&cb->work);
997 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1000 static void vc4_seqno_cb_work(struct work_struct *work)
1002 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1007 int vc4_queue_seqno_cb(struct drm_device *dev,
1008 struct vc4_seqno_cb *cb, uint64_t seqno,
1009 void (*func)(struct vc4_seqno_cb *cb))
1011 struct vc4_dev *vc4 = to_vc4_dev(dev);
1013 unsigned long irqflags;
1016 INIT_WORK(&cb->work, vc4_seqno_cb_work);
1018 spin_lock_irqsave(&vc4->job_lock, irqflags);
1019 if (seqno > vc4->finished_seqno) {
1021 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1023 schedule_work(&cb->work);
1025 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1030 /* Scheduled when any job has been completed, this walks the list of
1031 * jobs that had completed and unrefs their BOs and frees their exec
1035 vc4_job_done_work(struct work_struct *work)
1037 struct vc4_dev *vc4 =
1038 container_of(work, struct vc4_dev, job_done_work);
1040 vc4_job_handle_completed(vc4);
1044 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1046 uint64_t *timeout_ns)
1048 unsigned long start = jiffies;
1049 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1051 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1052 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1054 if (*timeout_ns >= delta)
1055 *timeout_ns -= delta;
1062 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1063 struct drm_file *file_priv)
1065 struct drm_vc4_wait_seqno *args = data;
1067 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1072 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1073 struct drm_file *file_priv)
1076 struct drm_vc4_wait_bo *args = data;
1077 struct drm_gem_object *gem_obj;
1083 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1085 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1088 bo = to_vc4_bo(gem_obj);
1090 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1093 drm_gem_object_put_unlocked(gem_obj);
1098 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1100 * @data: ioctl argument
1101 * @file_priv: DRM file for this fd
1103 * This is the main entrypoint for userspace to submit a 3D frame to
1104 * the GPU. Userspace provides the binner command list (if
1105 * applicable), and the kernel sets up the render command list to draw
1106 * to the framebuffer described in the ioctl, using the command lists
1107 * that the 3D engine's binner will produce.
1110 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1111 struct drm_file *file_priv)
1113 struct vc4_dev *vc4 = to_vc4_dev(dev);
1114 struct vc4_file *vc4file = file_priv->driver_priv;
1115 struct drm_vc4_submit_cl *args = data;
1116 struct vc4_exec_info *exec;
1117 struct ww_acquire_ctx acquire_ctx;
1120 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1121 VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1122 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1123 VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1124 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1128 if (args->pad2 != 0) {
1129 DRM_DEBUG("->pad2 must be set to zero\n");
1133 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1135 DRM_ERROR("malloc failure on exec struct\n");
1139 mutex_lock(&vc4->power_lock);
1140 if (vc4->power_refcount++ == 0) {
1141 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
1143 mutex_unlock(&vc4->power_lock);
1144 vc4->power_refcount--;
1149 mutex_unlock(&vc4->power_lock);
1152 INIT_LIST_HEAD(&exec->unref_list);
1154 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1158 if (args->perfmonid) {
1159 exec->perfmon = vc4_perfmon_find(vc4file,
1161 if (!exec->perfmon) {
1167 if (exec->args->bin_cl_size != 0) {
1168 ret = vc4_get_bcl(dev, exec);
1176 ret = vc4_get_rcl(dev, exec);
1180 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1184 /* Clear this out of the struct we'll be putting in the queue,
1185 * since it's part of our stack.
1189 ret = vc4_queue_submit(dev, exec, &acquire_ctx);
1193 /* Return the seqno for our job. */
1194 args->seqno = vc4->emit_seqno;
1199 vc4_complete_exec(vc4->dev, exec);
1205 vc4_gem_init(struct drm_device *dev)
1207 struct vc4_dev *vc4 = to_vc4_dev(dev);
1209 vc4->dma_fence_context = dma_fence_context_alloc(1);
1211 INIT_LIST_HEAD(&vc4->bin_job_list);
1212 INIT_LIST_HEAD(&vc4->render_job_list);
1213 INIT_LIST_HEAD(&vc4->job_done_list);
1214 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1215 spin_lock_init(&vc4->job_lock);
1217 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1218 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1220 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1222 mutex_init(&vc4->power_lock);
1224 INIT_LIST_HEAD(&vc4->purgeable.list);
1225 mutex_init(&vc4->purgeable.lock);
1229 vc4_gem_destroy(struct drm_device *dev)
1231 struct vc4_dev *vc4 = to_vc4_dev(dev);
1233 /* Waiting for exec to finish would need to be done before
1234 * unregistering V3D.
1236 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1238 /* V3D should already have disabled its interrupt and cleared
1239 * the overflow allocation registers. Now free the object.
1242 drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1246 if (vc4->hang_state)
1247 vc4_free_hang_state(dev, vc4->hang_state);
1250 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *file_priv)
1253 struct drm_vc4_gem_madvise *args = data;
1254 struct drm_gem_object *gem_obj;
1258 switch (args->madv) {
1259 case VC4_MADV_DONTNEED:
1260 case VC4_MADV_WILLNEED:
1269 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1271 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1275 bo = to_vc4_bo(gem_obj);
1277 /* Only BOs exposed to userspace can be purged. */
1278 if (bo->madv == __VC4_MADV_NOTSUPP) {
1279 DRM_DEBUG("madvise not supported on this BO\n");
1284 /* Not sure it's safe to purge imported BOs. Let's just assume it's
1285 * not until proven otherwise.
1287 if (gem_obj->import_attach) {
1288 DRM_DEBUG("madvise not supported on imported BOs\n");
1293 mutex_lock(&bo->madv_lock);
1295 if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1296 !refcount_read(&bo->usecnt)) {
1297 /* If the BO is about to be marked as purgeable, is not used
1298 * and is not already purgeable or purged, add it to the
1301 vc4_bo_add_to_purgeable_pool(bo);
1302 } else if (args->madv == VC4_MADV_WILLNEED &&
1303 bo->madv == VC4_MADV_DONTNEED &&
1304 !refcount_read(&bo->usecnt)) {
1305 /* The BO has not been purged yet, just remove it from
1306 * the purgeable list.
1308 vc4_bo_remove_from_purgeable_pool(bo);
1311 /* Save the purged state. */
1312 args->retained = bo->madv != __VC4_MADV_PURGED;
1314 /* Update internal madv state only if the bo was not purged. */
1315 if (bo->madv != __VC4_MADV_PURGED)
1316 bo->madv = args->madv;
1318 mutex_unlock(&bo->madv_lock);
1323 drm_gem_object_put_unlocked(gem_obj);