Merge tag 'net-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / gpu / drm / vc4 / vc4_gem.c
1 /*
2  * Copyright © 2014 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/sched/signal.h>
30 #include <linux/dma-fence-array.h>
31
32 #include <drm/drm_syncobj.h>
33
34 #include "uapi/drm/vc4_drm.h"
35 #include "vc4_drv.h"
36 #include "vc4_regs.h"
37 #include "vc4_trace.h"
38
39 static void
40 vc4_queue_hangcheck(struct drm_device *dev)
41 {
42         struct vc4_dev *vc4 = to_vc4_dev(dev);
43
44         mod_timer(&vc4->hangcheck.timer,
45                   round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46 }
47
48 struct vc4_hang_state {
49         struct drm_vc4_get_hang_state user_state;
50
51         u32 bo_count;
52         struct drm_gem_object **bo;
53 };
54
55 static void
56 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57 {
58         unsigned int i;
59
60         for (i = 0; i < state->user_state.bo_count; i++)
61                 drm_gem_object_put(state->bo[i]);
62
63         kfree(state);
64 }
65
66 int
67 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68                          struct drm_file *file_priv)
69 {
70         struct drm_vc4_get_hang_state *get_state = data;
71         struct drm_vc4_get_hang_state_bo *bo_state;
72         struct vc4_hang_state *kernel_state;
73         struct drm_vc4_get_hang_state *state;
74         struct vc4_dev *vc4 = to_vc4_dev(dev);
75         unsigned long irqflags;
76         u32 i;
77         int ret = 0;
78
79         if (!vc4->v3d) {
80                 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
81                 return -ENODEV;
82         }
83
84         spin_lock_irqsave(&vc4->job_lock, irqflags);
85         kernel_state = vc4->hang_state;
86         if (!kernel_state) {
87                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88                 return -ENOENT;
89         }
90         state = &kernel_state->user_state;
91
92         /* If the user's array isn't big enough, just return the
93          * required array size.
94          */
95         if (get_state->bo_count < state->bo_count) {
96                 get_state->bo_count = state->bo_count;
97                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98                 return 0;
99         }
100
101         vc4->hang_state = NULL;
102         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
103
104         /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105         state->bo = get_state->bo;
106         memcpy(get_state, state, sizeof(*state));
107
108         bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
109         if (!bo_state) {
110                 ret = -ENOMEM;
111                 goto err_free;
112         }
113
114         for (i = 0; i < state->bo_count; i++) {
115                 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
116                 u32 handle;
117
118                 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
119                                             &handle);
120
121                 if (ret) {
122                         state->bo_count = i;
123                         goto err_delete_handle;
124                 }
125                 bo_state[i].handle = handle;
126                 bo_state[i].paddr = vc4_bo->base.paddr;
127                 bo_state[i].size = vc4_bo->base.base.size;
128         }
129
130         if (copy_to_user(u64_to_user_ptr(get_state->bo),
131                          bo_state,
132                          state->bo_count * sizeof(*bo_state)))
133                 ret = -EFAULT;
134
135 err_delete_handle:
136         if (ret) {
137                 for (i = 0; i < state->bo_count; i++)
138                         drm_gem_handle_delete(file_priv, bo_state[i].handle);
139         }
140
141 err_free:
142         vc4_free_hang_state(dev, kernel_state);
143         kfree(bo_state);
144
145         return ret;
146 }
147
148 static void
149 vc4_save_hang_state(struct drm_device *dev)
150 {
151         struct vc4_dev *vc4 = to_vc4_dev(dev);
152         struct drm_vc4_get_hang_state *state;
153         struct vc4_hang_state *kernel_state;
154         struct vc4_exec_info *exec[2];
155         struct vc4_bo *bo;
156         unsigned long irqflags;
157         unsigned int i, j, k, unref_list_count;
158
159         kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
160         if (!kernel_state)
161                 return;
162
163         state = &kernel_state->user_state;
164
165         spin_lock_irqsave(&vc4->job_lock, irqflags);
166         exec[0] = vc4_first_bin_job(vc4);
167         exec[1] = vc4_first_render_job(vc4);
168         if (!exec[0] && !exec[1]) {
169                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170                 return;
171         }
172
173         /* Get the bos from both binner and renderer into hang state. */
174         state->bo_count = 0;
175         for (i = 0; i < 2; i++) {
176                 if (!exec[i])
177                         continue;
178
179                 unref_list_count = 0;
180                 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
181                         unref_list_count++;
182                 state->bo_count += exec[i]->bo_count + unref_list_count;
183         }
184
185         kernel_state->bo = kcalloc(state->bo_count,
186                                    sizeof(*kernel_state->bo), GFP_ATOMIC);
187
188         if (!kernel_state->bo) {
189                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
190                 return;
191         }
192
193         k = 0;
194         for (i = 0; i < 2; i++) {
195                 if (!exec[i])
196                         continue;
197
198                 for (j = 0; j < exec[i]->bo_count; j++) {
199                         bo = to_vc4_bo(&exec[i]->bo[j]->base);
200
201                         /* Retain BOs just in case they were marked purgeable.
202                          * This prevents the BO from being purged before
203                          * someone had a chance to dump the hang state.
204                          */
205                         WARN_ON(!refcount_read(&bo->usecnt));
206                         refcount_inc(&bo->usecnt);
207                         drm_gem_object_get(&exec[i]->bo[j]->base);
208                         kernel_state->bo[k++] = &exec[i]->bo[j]->base;
209                 }
210
211                 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212                         /* No need to retain BOs coming from the ->unref_list
213                          * because they are naturally unpurgeable.
214                          */
215                         drm_gem_object_get(&bo->base.base);
216                         kernel_state->bo[k++] = &bo->base.base;
217                 }
218         }
219
220         WARN_ON_ONCE(k != state->bo_count);
221
222         if (exec[0])
223                 state->start_bin = exec[0]->ct0ca;
224         if (exec[1])
225                 state->start_render = exec[1]->ct1ca;
226
227         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
228
229         state->ct0ca = V3D_READ(V3D_CTNCA(0));
230         state->ct0ea = V3D_READ(V3D_CTNEA(0));
231
232         state->ct1ca = V3D_READ(V3D_CTNCA(1));
233         state->ct1ea = V3D_READ(V3D_CTNEA(1));
234
235         state->ct0cs = V3D_READ(V3D_CTNCS(0));
236         state->ct1cs = V3D_READ(V3D_CTNCS(1));
237
238         state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239         state->ct1ra0 = V3D_READ(V3D_CT01RA0);
240
241         state->bpca = V3D_READ(V3D_BPCA);
242         state->bpcs = V3D_READ(V3D_BPCS);
243         state->bpoa = V3D_READ(V3D_BPOA);
244         state->bpos = V3D_READ(V3D_BPOS);
245
246         state->vpmbase = V3D_READ(V3D_VPMBASE);
247
248         state->dbge = V3D_READ(V3D_DBGE);
249         state->fdbgo = V3D_READ(V3D_FDBGO);
250         state->fdbgb = V3D_READ(V3D_FDBGB);
251         state->fdbgr = V3D_READ(V3D_FDBGR);
252         state->fdbgs = V3D_READ(V3D_FDBGS);
253         state->errstat = V3D_READ(V3D_ERRSTAT);
254
255         /* We need to turn purgeable BOs into unpurgeable ones so that
256          * userspace has a chance to dump the hang state before the kernel
257          * decides to purge those BOs.
258          * Note that BO consistency at dump time cannot be guaranteed. For
259          * example, if the owner of these BOs decides to re-use them or mark
260          * them purgeable again there's nothing we can do to prevent it.
261          */
262         for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263                 struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
264
265                 if (bo->madv == __VC4_MADV_NOTSUPP)
266                         continue;
267
268                 mutex_lock(&bo->madv_lock);
269                 if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270                         bo->madv = VC4_MADV_WILLNEED;
271                 refcount_dec(&bo->usecnt);
272                 mutex_unlock(&bo->madv_lock);
273         }
274
275         spin_lock_irqsave(&vc4->job_lock, irqflags);
276         if (vc4->hang_state) {
277                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278                 vc4_free_hang_state(dev, kernel_state);
279         } else {
280                 vc4->hang_state = kernel_state;
281                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
282         }
283 }
284
285 static void
286 vc4_reset(struct drm_device *dev)
287 {
288         struct vc4_dev *vc4 = to_vc4_dev(dev);
289
290         DRM_INFO("Resetting GPU.\n");
291
292         mutex_lock(&vc4->power_lock);
293         if (vc4->power_refcount) {
294                 /* Power the device off and back on the by dropping the
295                  * reference on runtime PM.
296                  */
297                 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298                 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
299         }
300         mutex_unlock(&vc4->power_lock);
301
302         vc4_irq_reset(dev);
303
304         /* Rearm the hangcheck -- another job might have been waiting
305          * for our hung one to get kicked off, and vc4_irq_reset()
306          * would have started it.
307          */
308         vc4_queue_hangcheck(dev);
309 }
310
311 static void
312 vc4_reset_work(struct work_struct *work)
313 {
314         struct vc4_dev *vc4 =
315                 container_of(work, struct vc4_dev, hangcheck.reset_work);
316
317         vc4_save_hang_state(&vc4->base);
318
319         vc4_reset(&vc4->base);
320 }
321
322 static void
323 vc4_hangcheck_elapsed(struct timer_list *t)
324 {
325         struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326         struct drm_device *dev = &vc4->base;
327         uint32_t ct0ca, ct1ca;
328         unsigned long irqflags;
329         struct vc4_exec_info *bin_exec, *render_exec;
330
331         spin_lock_irqsave(&vc4->job_lock, irqflags);
332
333         bin_exec = vc4_first_bin_job(vc4);
334         render_exec = vc4_first_render_job(vc4);
335
336         /* If idle, we can stop watching for hangs. */
337         if (!bin_exec && !render_exec) {
338                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
339                 return;
340         }
341
342         ct0ca = V3D_READ(V3D_CTNCA(0));
343         ct1ca = V3D_READ(V3D_CTNCA(1));
344
345         /* If we've made any progress in execution, rearm the timer
346          * and wait.
347          */
348         if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349             (render_exec && ct1ca != render_exec->last_ct1ca)) {
350                 if (bin_exec)
351                         bin_exec->last_ct0ca = ct0ca;
352                 if (render_exec)
353                         render_exec->last_ct1ca = ct1ca;
354                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355                 vc4_queue_hangcheck(dev);
356                 return;
357         }
358
359         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
360
361         /* We've gone too long with no progress, reset.  This has to
362          * be done from a work struct, since resetting can sleep and
363          * this timer hook isn't allowed to.
364          */
365         schedule_work(&vc4->hangcheck.reset_work);
366 }
367
368 static void
369 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
370 {
371         struct vc4_dev *vc4 = to_vc4_dev(dev);
372
373         /* Set the current and end address of the control list.
374          * Writing the end register is what starts the job.
375          */
376         V3D_WRITE(V3D_CTNCA(thread), start);
377         V3D_WRITE(V3D_CTNEA(thread), end);
378 }
379
380 int
381 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
382                    bool interruptible)
383 {
384         struct vc4_dev *vc4 = to_vc4_dev(dev);
385         int ret = 0;
386         unsigned long timeout_expire;
387         DEFINE_WAIT(wait);
388
389         if (vc4->finished_seqno >= seqno)
390                 return 0;
391
392         if (timeout_ns == 0)
393                 return -ETIME;
394
395         timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
396
397         trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
398         for (;;) {
399                 prepare_to_wait(&vc4->job_wait_queue, &wait,
400                                 interruptible ? TASK_INTERRUPTIBLE :
401                                 TASK_UNINTERRUPTIBLE);
402
403                 if (interruptible && signal_pending(current)) {
404                         ret = -ERESTARTSYS;
405                         break;
406                 }
407
408                 if (vc4->finished_seqno >= seqno)
409                         break;
410
411                 if (timeout_ns != ~0ull) {
412                         if (time_after_eq(jiffies, timeout_expire)) {
413                                 ret = -ETIME;
414                                 break;
415                         }
416                         schedule_timeout(timeout_expire - jiffies);
417                 } else {
418                         schedule();
419                 }
420         }
421
422         finish_wait(&vc4->job_wait_queue, &wait);
423         trace_vc4_wait_for_seqno_end(dev, seqno);
424
425         return ret;
426 }
427
428 static void
429 vc4_flush_caches(struct drm_device *dev)
430 {
431         struct vc4_dev *vc4 = to_vc4_dev(dev);
432
433         /* Flush the GPU L2 caches.  These caches sit on top of system
434          * L3 (the 128kb or so shared with the CPU), and are
435          * non-allocating in the L3.
436          */
437         V3D_WRITE(V3D_L2CACTL,
438                   V3D_L2CACTL_L2CCLR);
439
440         V3D_WRITE(V3D_SLCACTL,
441                   VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442                   VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443                   VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444                   VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
445 }
446
447 static void
448 vc4_flush_texture_caches(struct drm_device *dev)
449 {
450         struct vc4_dev *vc4 = to_vc4_dev(dev);
451
452         V3D_WRITE(V3D_L2CACTL,
453                   V3D_L2CACTL_L2CCLR);
454
455         V3D_WRITE(V3D_SLCACTL,
456                   VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457                   VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
458 }
459
460 /* Sets the registers for the next job to be actually be executed in
461  * the hardware.
462  *
463  * The job_lock should be held during this.
464  */
465 void
466 vc4_submit_next_bin_job(struct drm_device *dev)
467 {
468         struct vc4_dev *vc4 = to_vc4_dev(dev);
469         struct vc4_exec_info *exec;
470
471 again:
472         exec = vc4_first_bin_job(vc4);
473         if (!exec)
474                 return;
475
476         vc4_flush_caches(dev);
477
478         /* Only start the perfmon if it was not already started by a previous
479          * job.
480          */
481         if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482                 vc4_perfmon_start(vc4, exec->perfmon);
483
484         /* Either put the job in the binner if it uses the binner, or
485          * immediately move it to the to-be-rendered queue.
486          */
487         if (exec->ct0ca != exec->ct0ea) {
488                 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
489         } else {
490                 struct vc4_exec_info *next;
491
492                 vc4_move_job_to_render(dev, exec);
493                 next = vc4_first_bin_job(vc4);
494
495                 /* We can't start the next bin job if the previous job had a
496                  * different perfmon instance attached to it. The same goes
497                  * if one of them had a perfmon attached to it and the other
498                  * one doesn't.
499                  */
500                 if (next && next->perfmon == exec->perfmon)
501                         goto again;
502         }
503 }
504
505 void
506 vc4_submit_next_render_job(struct drm_device *dev)
507 {
508         struct vc4_dev *vc4 = to_vc4_dev(dev);
509         struct vc4_exec_info *exec = vc4_first_render_job(vc4);
510
511         if (!exec)
512                 return;
513
514         /* A previous RCL may have written to one of our textures, and
515          * our full cache flush at bin time may have occurred before
516          * that RCL completed.  Flush the texture cache now, but not
517          * the instructions or uniforms (since we don't write those
518          * from an RCL).
519          */
520         vc4_flush_texture_caches(dev);
521
522         submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
523 }
524
525 void
526 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
527 {
528         struct vc4_dev *vc4 = to_vc4_dev(dev);
529         bool was_empty = list_empty(&vc4->render_job_list);
530
531         list_move_tail(&exec->head, &vc4->render_job_list);
532         if (was_empty)
533                 vc4_submit_next_render_job(dev);
534 }
535
536 static void
537 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
538 {
539         struct vc4_bo *bo;
540         unsigned i;
541
542         for (i = 0; i < exec->bo_count; i++) {
543                 bo = to_vc4_bo(&exec->bo[i]->base);
544                 bo->seqno = seqno;
545
546                 dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
547         }
548
549         list_for_each_entry(bo, &exec->unref_list, unref_head) {
550                 bo->seqno = seqno;
551         }
552
553         for (i = 0; i < exec->rcl_write_bo_count; i++) {
554                 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
555                 bo->write_seqno = seqno;
556
557                 dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
558         }
559 }
560
561 static void
562 vc4_unlock_bo_reservations(struct drm_device *dev,
563                            struct vc4_exec_info *exec,
564                            struct ww_acquire_ctx *acquire_ctx)
565 {
566         int i;
567
568         for (i = 0; i < exec->bo_count; i++) {
569                 struct drm_gem_object *bo = &exec->bo[i]->base;
570
571                 dma_resv_unlock(bo->resv);
572         }
573
574         ww_acquire_fini(acquire_ctx);
575 }
576
577 /* Takes the reservation lock on all the BOs being referenced, so that
578  * at queue submit time we can update the reservations.
579  *
580  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
581  * (all of which are on exec->unref_list).  They're entirely private
582  * to vc4, so we don't attach dma-buf fences to them.
583  */
584 static int
585 vc4_lock_bo_reservations(struct drm_device *dev,
586                          struct vc4_exec_info *exec,
587                          struct ww_acquire_ctx *acquire_ctx)
588 {
589         int contended_lock = -1;
590         int i, ret;
591         struct drm_gem_object *bo;
592
593         ww_acquire_init(acquire_ctx, &reservation_ww_class);
594
595 retry:
596         if (contended_lock != -1) {
597                 bo = &exec->bo[contended_lock]->base;
598                 ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
599                 if (ret) {
600                         ww_acquire_done(acquire_ctx);
601                         return ret;
602                 }
603         }
604
605         for (i = 0; i < exec->bo_count; i++) {
606                 if (i == contended_lock)
607                         continue;
608
609                 bo = &exec->bo[i]->base;
610
611                 ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
612                 if (ret) {
613                         int j;
614
615                         for (j = 0; j < i; j++) {
616                                 bo = &exec->bo[j]->base;
617                                 dma_resv_unlock(bo->resv);
618                         }
619
620                         if (contended_lock != -1 && contended_lock >= i) {
621                                 bo = &exec->bo[contended_lock]->base;
622
623                                 dma_resv_unlock(bo->resv);
624                         }
625
626                         if (ret == -EDEADLK) {
627                                 contended_lock = i;
628                                 goto retry;
629                         }
630
631                         ww_acquire_done(acquire_ctx);
632                         return ret;
633                 }
634         }
635
636         ww_acquire_done(acquire_ctx);
637
638         /* Reserve space for our shared (read-only) fence references,
639          * before we commit the CL to the hardware.
640          */
641         for (i = 0; i < exec->bo_count; i++) {
642                 bo = &exec->bo[i]->base;
643
644                 ret = dma_resv_reserve_shared(bo->resv, 1);
645                 if (ret) {
646                         vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
647                         return ret;
648                 }
649         }
650
651         return 0;
652 }
653
654 /* Queues a struct vc4_exec_info for execution.  If no job is
655  * currently executing, then submits it.
656  *
657  * Unlike most GPUs, our hardware only handles one command list at a
658  * time.  To queue multiple jobs at once, we'd need to edit the
659  * previous command list to have a jump to the new one at the end, and
660  * then bump the end address.  That's a change for a later date,
661  * though.
662  */
663 static int
664 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
665                  struct ww_acquire_ctx *acquire_ctx,
666                  struct drm_syncobj *out_sync)
667 {
668         struct vc4_dev *vc4 = to_vc4_dev(dev);
669         struct vc4_exec_info *renderjob;
670         uint64_t seqno;
671         unsigned long irqflags;
672         struct vc4_fence *fence;
673
674         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
675         if (!fence)
676                 return -ENOMEM;
677         fence->dev = dev;
678
679         spin_lock_irqsave(&vc4->job_lock, irqflags);
680
681         seqno = ++vc4->emit_seqno;
682         exec->seqno = seqno;
683
684         dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
685                        vc4->dma_fence_context, exec->seqno);
686         fence->seqno = exec->seqno;
687         exec->fence = &fence->base;
688
689         if (out_sync)
690                 drm_syncobj_replace_fence(out_sync, exec->fence);
691
692         vc4_update_bo_seqnos(exec, seqno);
693
694         vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
695
696         list_add_tail(&exec->head, &vc4->bin_job_list);
697
698         /* If no bin job was executing and if the render job (if any) has the
699          * same perfmon as our job attached to it (or if both jobs don't have
700          * perfmon activated), then kick ours off.  Otherwise, it'll get
701          * started when the previous job's flush/render done interrupt occurs.
702          */
703         renderjob = vc4_first_render_job(vc4);
704         if (vc4_first_bin_job(vc4) == exec &&
705             (!renderjob || renderjob->perfmon == exec->perfmon)) {
706                 vc4_submit_next_bin_job(dev);
707                 vc4_queue_hangcheck(dev);
708         }
709
710         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
711
712         return 0;
713 }
714
715 /**
716  * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
717  * referenced by the job.
718  * @dev: DRM device
719  * @file_priv: DRM file for this fd
720  * @exec: V3D job being set up
721  *
722  * The command validator needs to reference BOs by their index within
723  * the submitted job's BO list.  This does the validation of the job's
724  * BO list and reference counting for the lifetime of the job.
725  */
726 static int
727 vc4_cl_lookup_bos(struct drm_device *dev,
728                   struct drm_file *file_priv,
729                   struct vc4_exec_info *exec)
730 {
731         struct drm_vc4_submit_cl *args = exec->args;
732         uint32_t *handles;
733         int ret = 0;
734         int i;
735
736         exec->bo_count = args->bo_handle_count;
737
738         if (!exec->bo_count) {
739                 /* See comment on bo_index for why we have to check
740                  * this.
741                  */
742                 DRM_DEBUG("Rendering requires BOs to validate\n");
743                 return -EINVAL;
744         }
745
746         exec->bo = kvmalloc_array(exec->bo_count,
747                                     sizeof(struct drm_gem_cma_object *),
748                                     GFP_KERNEL | __GFP_ZERO);
749         if (!exec->bo) {
750                 DRM_ERROR("Failed to allocate validated BO pointers\n");
751                 return -ENOMEM;
752         }
753
754         handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
755         if (!handles) {
756                 ret = -ENOMEM;
757                 DRM_ERROR("Failed to allocate incoming GEM handles\n");
758                 goto fail;
759         }
760
761         if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
762                            exec->bo_count * sizeof(uint32_t))) {
763                 ret = -EFAULT;
764                 DRM_ERROR("Failed to copy in GEM handles\n");
765                 goto fail;
766         }
767
768         spin_lock(&file_priv->table_lock);
769         for (i = 0; i < exec->bo_count; i++) {
770                 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
771                                                      handles[i]);
772                 if (!bo) {
773                         DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
774                                   i, handles[i]);
775                         ret = -EINVAL;
776                         break;
777                 }
778
779                 drm_gem_object_get(bo);
780                 exec->bo[i] = (struct drm_gem_cma_object *)bo;
781         }
782         spin_unlock(&file_priv->table_lock);
783
784         if (ret)
785                 goto fail_put_bo;
786
787         for (i = 0; i < exec->bo_count; i++) {
788                 ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
789                 if (ret)
790                         goto fail_dec_usecnt;
791         }
792
793         kvfree(handles);
794         return 0;
795
796 fail_dec_usecnt:
797         /* Decrease usecnt on acquired objects.
798          * We cannot rely on  vc4_complete_exec() to release resources here,
799          * because vc4_complete_exec() has no information about which BO has
800          * had its ->usecnt incremented.
801          * To make things easier we just free everything explicitly and set
802          * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
803          * step.
804          */
805         for (i-- ; i >= 0; i--)
806                 vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
807
808 fail_put_bo:
809         /* Release any reference to acquired objects. */
810         for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
811                 drm_gem_object_put(&exec->bo[i]->base);
812
813 fail:
814         kvfree(handles);
815         kvfree(exec->bo);
816         exec->bo = NULL;
817         return ret;
818 }
819
820 static int
821 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
822 {
823         struct drm_vc4_submit_cl *args = exec->args;
824         struct vc4_dev *vc4 = to_vc4_dev(dev);
825         void *temp = NULL;
826         void *bin;
827         int ret = 0;
828         uint32_t bin_offset = 0;
829         uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
830                                              16);
831         uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
832         uint32_t exec_size = uniforms_offset + args->uniforms_size;
833         uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
834                                           args->shader_rec_count);
835         struct vc4_bo *bo;
836
837         if (shader_rec_offset < args->bin_cl_size ||
838             uniforms_offset < shader_rec_offset ||
839             exec_size < uniforms_offset ||
840             args->shader_rec_count >= (UINT_MAX /
841                                           sizeof(struct vc4_shader_state)) ||
842             temp_size < exec_size) {
843                 DRM_DEBUG("overflow in exec arguments\n");
844                 ret = -EINVAL;
845                 goto fail;
846         }
847
848         /* Allocate space where we'll store the copied in user command lists
849          * and shader records.
850          *
851          * We don't just copy directly into the BOs because we need to
852          * read the contents back for validation, and I think the
853          * bo->vaddr is uncached access.
854          */
855         temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
856         if (!temp) {
857                 DRM_ERROR("Failed to allocate storage for copying "
858                           "in bin/render CLs.\n");
859                 ret = -ENOMEM;
860                 goto fail;
861         }
862         bin = temp + bin_offset;
863         exec->shader_rec_u = temp + shader_rec_offset;
864         exec->uniforms_u = temp + uniforms_offset;
865         exec->shader_state = temp + exec_size;
866         exec->shader_state_size = args->shader_rec_count;
867
868         if (copy_from_user(bin,
869                            u64_to_user_ptr(args->bin_cl),
870                            args->bin_cl_size)) {
871                 ret = -EFAULT;
872                 goto fail;
873         }
874
875         if (copy_from_user(exec->shader_rec_u,
876                            u64_to_user_ptr(args->shader_rec),
877                            args->shader_rec_size)) {
878                 ret = -EFAULT;
879                 goto fail;
880         }
881
882         if (copy_from_user(exec->uniforms_u,
883                            u64_to_user_ptr(args->uniforms),
884                            args->uniforms_size)) {
885                 ret = -EFAULT;
886                 goto fail;
887         }
888
889         bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
890         if (IS_ERR(bo)) {
891                 DRM_ERROR("Couldn't allocate BO for binning\n");
892                 ret = PTR_ERR(bo);
893                 goto fail;
894         }
895         exec->exec_bo = &bo->base;
896
897         list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
898                       &exec->unref_list);
899
900         exec->ct0ca = exec->exec_bo->paddr + bin_offset;
901
902         exec->bin_u = bin;
903
904         exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
905         exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
906         exec->shader_rec_size = args->shader_rec_size;
907
908         exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
909         exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
910         exec->uniforms_size = args->uniforms_size;
911
912         ret = vc4_validate_bin_cl(dev,
913                                   exec->exec_bo->vaddr + bin_offset,
914                                   bin,
915                                   exec);
916         if (ret)
917                 goto fail;
918
919         ret = vc4_validate_shader_recs(dev, exec);
920         if (ret)
921                 goto fail;
922
923         if (exec->found_tile_binning_mode_config_packet) {
924                 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
925                 if (ret)
926                         goto fail;
927         }
928
929         /* Block waiting on any previous rendering into the CS's VBO,
930          * IB, or textures, so that pixels are actually written by the
931          * time we try to read them.
932          */
933         ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
934
935 fail:
936         kvfree(temp);
937         return ret;
938 }
939
940 static void
941 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
942 {
943         struct vc4_dev *vc4 = to_vc4_dev(dev);
944         unsigned long irqflags;
945         unsigned i;
946
947         /* If we got force-completed because of GPU reset rather than
948          * through our IRQ handler, signal the fence now.
949          */
950         if (exec->fence) {
951                 dma_fence_signal(exec->fence);
952                 dma_fence_put(exec->fence);
953         }
954
955         if (exec->bo) {
956                 for (i = 0; i < exec->bo_count; i++) {
957                         struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
958
959                         vc4_bo_dec_usecnt(bo);
960                         drm_gem_object_put(&exec->bo[i]->base);
961                 }
962                 kvfree(exec->bo);
963         }
964
965         while (!list_empty(&exec->unref_list)) {
966                 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
967                                                      struct vc4_bo, unref_head);
968                 list_del(&bo->unref_head);
969                 drm_gem_object_put(&bo->base.base);
970         }
971
972         /* Free up the allocation of any bin slots we used. */
973         spin_lock_irqsave(&vc4->job_lock, irqflags);
974         vc4->bin_alloc_used &= ~exec->bin_slots;
975         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
976
977         /* Release the reference on the binner BO if needed. */
978         if (exec->bin_bo_used)
979                 vc4_v3d_bin_bo_put(vc4);
980
981         /* Release the reference we had on the perf monitor. */
982         vc4_perfmon_put(exec->perfmon);
983
984         vc4_v3d_pm_put(vc4);
985
986         kfree(exec);
987 }
988
989 void
990 vc4_job_handle_completed(struct vc4_dev *vc4)
991 {
992         unsigned long irqflags;
993         struct vc4_seqno_cb *cb, *cb_temp;
994
995         spin_lock_irqsave(&vc4->job_lock, irqflags);
996         while (!list_empty(&vc4->job_done_list)) {
997                 struct vc4_exec_info *exec =
998                         list_first_entry(&vc4->job_done_list,
999                                          struct vc4_exec_info, head);
1000                 list_del(&exec->head);
1001
1002                 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003                 vc4_complete_exec(&vc4->base, exec);
1004                 spin_lock_irqsave(&vc4->job_lock, irqflags);
1005         }
1006
1007         list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1008                 if (cb->seqno <= vc4->finished_seqno) {
1009                         list_del_init(&cb->work.entry);
1010                         schedule_work(&cb->work);
1011                 }
1012         }
1013
1014         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1015 }
1016
1017 static void vc4_seqno_cb_work(struct work_struct *work)
1018 {
1019         struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1020
1021         cb->func(cb);
1022 }
1023
1024 int vc4_queue_seqno_cb(struct drm_device *dev,
1025                        struct vc4_seqno_cb *cb, uint64_t seqno,
1026                        void (*func)(struct vc4_seqno_cb *cb))
1027 {
1028         struct vc4_dev *vc4 = to_vc4_dev(dev);
1029         unsigned long irqflags;
1030
1031         cb->func = func;
1032         INIT_WORK(&cb->work, vc4_seqno_cb_work);
1033
1034         spin_lock_irqsave(&vc4->job_lock, irqflags);
1035         if (seqno > vc4->finished_seqno) {
1036                 cb->seqno = seqno;
1037                 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1038         } else {
1039                 schedule_work(&cb->work);
1040         }
1041         spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1042
1043         return 0;
1044 }
1045
1046 /* Scheduled when any job has been completed, this walks the list of
1047  * jobs that had completed and unrefs their BOs and frees their exec
1048  * structs.
1049  */
1050 static void
1051 vc4_job_done_work(struct work_struct *work)
1052 {
1053         struct vc4_dev *vc4 =
1054                 container_of(work, struct vc4_dev, job_done_work);
1055
1056         vc4_job_handle_completed(vc4);
1057 }
1058
1059 static int
1060 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1061                                 uint64_t seqno,
1062                                 uint64_t *timeout_ns)
1063 {
1064         unsigned long start = jiffies;
1065         int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1066
1067         if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1068                 uint64_t delta = jiffies_to_nsecs(jiffies - start);
1069
1070                 if (*timeout_ns >= delta)
1071                         *timeout_ns -= delta;
1072         }
1073
1074         return ret;
1075 }
1076
1077 int
1078 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1079                      struct drm_file *file_priv)
1080 {
1081         struct drm_vc4_wait_seqno *args = data;
1082
1083         return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1084                                                &args->timeout_ns);
1085 }
1086
1087 int
1088 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1089                   struct drm_file *file_priv)
1090 {
1091         int ret;
1092         struct drm_vc4_wait_bo *args = data;
1093         struct drm_gem_object *gem_obj;
1094         struct vc4_bo *bo;
1095
1096         if (args->pad != 0)
1097                 return -EINVAL;
1098
1099         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1100         if (!gem_obj) {
1101                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1102                 return -EINVAL;
1103         }
1104         bo = to_vc4_bo(gem_obj);
1105
1106         ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1107                                               &args->timeout_ns);
1108
1109         drm_gem_object_put(gem_obj);
1110         return ret;
1111 }
1112
1113 /**
1114  * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1115  * @dev: DRM device
1116  * @data: ioctl argument
1117  * @file_priv: DRM file for this fd
1118  *
1119  * This is the main entrypoint for userspace to submit a 3D frame to
1120  * the GPU.  Userspace provides the binner command list (if
1121  * applicable), and the kernel sets up the render command list to draw
1122  * to the framebuffer described in the ioctl, using the command lists
1123  * that the 3D engine's binner will produce.
1124  */
1125 int
1126 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1127                     struct drm_file *file_priv)
1128 {
1129         struct vc4_dev *vc4 = to_vc4_dev(dev);
1130         struct vc4_file *vc4file = file_priv->driver_priv;
1131         struct drm_vc4_submit_cl *args = data;
1132         struct drm_syncobj *out_sync = NULL;
1133         struct vc4_exec_info *exec;
1134         struct ww_acquire_ctx acquire_ctx;
1135         struct dma_fence *in_fence;
1136         int ret = 0;
1137
1138         if (!vc4->v3d) {
1139                 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1140                 return -ENODEV;
1141         }
1142
1143         if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1144                              VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1145                              VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1146                              VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1147                 DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1148                 return -EINVAL;
1149         }
1150
1151         if (args->pad2 != 0) {
1152                 DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1153                 return -EINVAL;
1154         }
1155
1156         exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1157         if (!exec) {
1158                 DRM_ERROR("malloc failure on exec struct\n");
1159                 return -ENOMEM;
1160         }
1161
1162         ret = vc4_v3d_pm_get(vc4);
1163         if (ret) {
1164                 kfree(exec);
1165                 return ret;
1166         }
1167
1168         exec->args = args;
1169         INIT_LIST_HEAD(&exec->unref_list);
1170
1171         ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1172         if (ret)
1173                 goto fail;
1174
1175         if (args->perfmonid) {
1176                 exec->perfmon = vc4_perfmon_find(vc4file,
1177                                                  args->perfmonid);
1178                 if (!exec->perfmon) {
1179                         ret = -ENOENT;
1180                         goto fail;
1181                 }
1182         }
1183
1184         if (args->in_sync) {
1185                 ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1186                                              0, 0, &in_fence);
1187                 if (ret)
1188                         goto fail;
1189
1190                 /* When the fence (or fence array) is exclusively from our
1191                  * context we can skip the wait since jobs are executed in
1192                  * order of their submission through this ioctl and this can
1193                  * only have fences from a prior job.
1194                  */
1195                 if (!dma_fence_match_context(in_fence,
1196                                              vc4->dma_fence_context)) {
1197                         ret = dma_fence_wait(in_fence, true);
1198                         if (ret) {
1199                                 dma_fence_put(in_fence);
1200                                 goto fail;
1201                         }
1202                 }
1203
1204                 dma_fence_put(in_fence);
1205         }
1206
1207         if (exec->args->bin_cl_size != 0) {
1208                 ret = vc4_get_bcl(dev, exec);
1209                 if (ret)
1210                         goto fail;
1211         } else {
1212                 exec->ct0ca = 0;
1213                 exec->ct0ea = 0;
1214         }
1215
1216         ret = vc4_get_rcl(dev, exec);
1217         if (ret)
1218                 goto fail;
1219
1220         ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1221         if (ret)
1222                 goto fail;
1223
1224         if (args->out_sync) {
1225                 out_sync = drm_syncobj_find(file_priv, args->out_sync);
1226                 if (!out_sync) {
1227                         ret = -EINVAL;
1228                         goto fail;
1229                 }
1230
1231                 /* We replace the fence in out_sync in vc4_queue_submit since
1232                  * the render job could execute immediately after that call.
1233                  * If it finishes before our ioctl processing resumes the
1234                  * render job fence could already have been freed.
1235                  */
1236         }
1237
1238         /* Clear this out of the struct we'll be putting in the queue,
1239          * since it's part of our stack.
1240          */
1241         exec->args = NULL;
1242
1243         ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1244
1245         /* The syncobj isn't part of the exec data and we need to free our
1246          * reference even if job submission failed.
1247          */
1248         if (out_sync)
1249                 drm_syncobj_put(out_sync);
1250
1251         if (ret)
1252                 goto fail;
1253
1254         /* Return the seqno for our job. */
1255         args->seqno = vc4->emit_seqno;
1256
1257         return 0;
1258
1259 fail:
1260         vc4_complete_exec(&vc4->base, exec);
1261
1262         return ret;
1263 }
1264
1265 static void vc4_gem_destroy(struct drm_device *dev, void *unused);
1266 int vc4_gem_init(struct drm_device *dev)
1267 {
1268         struct vc4_dev *vc4 = to_vc4_dev(dev);
1269
1270         vc4->dma_fence_context = dma_fence_context_alloc(1);
1271
1272         INIT_LIST_HEAD(&vc4->bin_job_list);
1273         INIT_LIST_HEAD(&vc4->render_job_list);
1274         INIT_LIST_HEAD(&vc4->job_done_list);
1275         INIT_LIST_HEAD(&vc4->seqno_cb_list);
1276         spin_lock_init(&vc4->job_lock);
1277
1278         INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1279         timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1280
1281         INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1282
1283         mutex_init(&vc4->power_lock);
1284
1285         INIT_LIST_HEAD(&vc4->purgeable.list);
1286         mutex_init(&vc4->purgeable.lock);
1287
1288         return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1289 }
1290
1291 static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1292 {
1293         struct vc4_dev *vc4 = to_vc4_dev(dev);
1294
1295         /* Waiting for exec to finish would need to be done before
1296          * unregistering V3D.
1297          */
1298         WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1299
1300         /* V3D should already have disabled its interrupt and cleared
1301          * the overflow allocation registers.  Now free the object.
1302          */
1303         if (vc4->bin_bo) {
1304                 drm_gem_object_put(&vc4->bin_bo->base.base);
1305                 vc4->bin_bo = NULL;
1306         }
1307
1308         if (vc4->hang_state)
1309                 vc4_free_hang_state(dev, vc4->hang_state);
1310 }
1311
1312 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1313                           struct drm_file *file_priv)
1314 {
1315         struct drm_vc4_gem_madvise *args = data;
1316         struct drm_gem_object *gem_obj;
1317         struct vc4_bo *bo;
1318         int ret;
1319
1320         switch (args->madv) {
1321         case VC4_MADV_DONTNEED:
1322         case VC4_MADV_WILLNEED:
1323                 break;
1324         default:
1325                 return -EINVAL;
1326         }
1327
1328         if (args->pad != 0)
1329                 return -EINVAL;
1330
1331         gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1332         if (!gem_obj) {
1333                 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1334                 return -ENOENT;
1335         }
1336
1337         bo = to_vc4_bo(gem_obj);
1338
1339         /* Only BOs exposed to userspace can be purged. */
1340         if (bo->madv == __VC4_MADV_NOTSUPP) {
1341                 DRM_DEBUG("madvise not supported on this BO\n");
1342                 ret = -EINVAL;
1343                 goto out_put_gem;
1344         }
1345
1346         /* Not sure it's safe to purge imported BOs. Let's just assume it's
1347          * not until proven otherwise.
1348          */
1349         if (gem_obj->import_attach) {
1350                 DRM_DEBUG("madvise not supported on imported BOs\n");
1351                 ret = -EINVAL;
1352                 goto out_put_gem;
1353         }
1354
1355         mutex_lock(&bo->madv_lock);
1356
1357         if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1358             !refcount_read(&bo->usecnt)) {
1359                 /* If the BO is about to be marked as purgeable, is not used
1360                  * and is not already purgeable or purged, add it to the
1361                  * purgeable list.
1362                  */
1363                 vc4_bo_add_to_purgeable_pool(bo);
1364         } else if (args->madv == VC4_MADV_WILLNEED &&
1365                    bo->madv == VC4_MADV_DONTNEED &&
1366                    !refcount_read(&bo->usecnt)) {
1367                 /* The BO has not been purged yet, just remove it from
1368                  * the purgeable list.
1369                  */
1370                 vc4_bo_remove_from_purgeable_pool(bo);
1371         }
1372
1373         /* Save the purged state. */
1374         args->retained = bo->madv != __VC4_MADV_PURGED;
1375
1376         /* Update internal madv state only if the bo was not purged. */
1377         if (bo->madv != __VC4_MADV_PURGED)
1378                 bo->madv = args->madv;
1379
1380         mutex_unlock(&bo->madv_lock);
1381
1382         ret = 0;
1383
1384 out_put_gem:
1385         drm_gem_object_put(gem_obj);
1386
1387         return ret;
1388 }