Merge patch series "riscv,isa fixups"
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44
45 #include "ttm_module.h"
46
47 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
48                                         struct ttm_placement *placement)
49 {
50         struct drm_printer p = drm_debug_printer(TTM_PFX);
51         struct ttm_resource_manager *man;
52         int i, mem_type;
53
54         for (i = 0; i < placement->num_placement; i++) {
55                 mem_type = placement->placement[i].mem_type;
56                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
57                            i, placement->placement[i].flags, mem_type);
58                 man = ttm_manager_type(bo->bdev, mem_type);
59                 ttm_resource_manager_debug(man, &p);
60         }
61 }
62
63 /**
64  * ttm_bo_move_to_lru_tail
65  *
66  * @bo: The buffer object.
67  *
68  * Move this BO to the tail of all lru lists used to lookup and reserve an
69  * object. This function must be called with struct ttm_global::lru_lock
70  * held, and is used to make a BO less likely to be considered for eviction.
71  */
72 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
73 {
74         dma_resv_assert_held(bo->base.resv);
75
76         if (bo->resource)
77                 ttm_resource_move_to_lru_tail(bo->resource);
78 }
79 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
80
81 /**
82  * ttm_bo_set_bulk_move - update BOs bulk move object
83  *
84  * @bo: The buffer object.
85  *
86  * Update the BOs bulk move object, making sure that resources are added/removed
87  * as well. A bulk move allows to move many resource on the LRU at once,
88  * resulting in much less overhead of maintaining the LRU.
89  * The only requirement is that the resources stay together on the LRU and are
90  * never separated. This is enforces by setting the bulk_move structure on a BO.
91  * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
92  * their LRU list.
93  */
94 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
95                           struct ttm_lru_bulk_move *bulk)
96 {
97         dma_resv_assert_held(bo->base.resv);
98
99         if (bo->bulk_move == bulk)
100                 return;
101
102         spin_lock(&bo->bdev->lru_lock);
103         if (bo->resource)
104                 ttm_resource_del_bulk_move(bo->resource, bo);
105         bo->bulk_move = bulk;
106         if (bo->resource)
107                 ttm_resource_add_bulk_move(bo->resource, bo);
108         spin_unlock(&bo->bdev->lru_lock);
109 }
110 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
111
112 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
113                                   struct ttm_resource *mem, bool evict,
114                                   struct ttm_operation_ctx *ctx,
115                                   struct ttm_place *hop)
116 {
117         struct ttm_device *bdev = bo->bdev;
118         bool old_use_tt, new_use_tt;
119         int ret;
120
121         old_use_tt = bo->resource &&
122                 ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
123         new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
124
125         ttm_bo_unmap_virtual(bo);
126
127         /*
128          * Create and bind a ttm if required.
129          */
130
131         if (new_use_tt) {
132                 /* Zero init the new TTM structure if the old location should
133                  * have used one as well.
134                  */
135                 ret = ttm_tt_create(bo, old_use_tt);
136                 if (ret)
137                         goto out_err;
138
139                 if (mem->mem_type != TTM_PL_SYSTEM) {
140                         ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
141                         if (ret)
142                                 goto out_err;
143                 }
144         }
145
146         ret = dma_resv_reserve_fences(bo->base.resv, 1);
147         if (ret)
148                 goto out_err;
149
150         ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
151         if (ret) {
152                 if (ret == -EMULTIHOP)
153                         return ret;
154                 goto out_err;
155         }
156
157         ctx->bytes_moved += bo->base.size;
158         return 0;
159
160 out_err:
161         if (!old_use_tt)
162                 ttm_bo_tt_destroy(bo);
163
164         return ret;
165 }
166
167 /*
168  * Call bo::reserved.
169  * Will release GPU memory type usage on destruction.
170  * This is the place to put in driver specific hooks to release
171  * driver private resources.
172  * Will release the bo::reserved lock.
173  */
174
175 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
176 {
177         if (bo->bdev->funcs->delete_mem_notify)
178                 bo->bdev->funcs->delete_mem_notify(bo);
179
180         ttm_bo_tt_destroy(bo);
181         ttm_resource_free(bo, &bo->resource);
182 }
183
184 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
185 {
186         int r;
187
188         if (bo->base.resv == &bo->base._resv)
189                 return 0;
190
191         BUG_ON(!dma_resv_trylock(&bo->base._resv));
192
193         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
194         dma_resv_unlock(&bo->base._resv);
195         if (r)
196                 return r;
197
198         if (bo->type != ttm_bo_type_sg) {
199                 /* This works because the BO is about to be destroyed and nobody
200                  * reference it any more. The only tricky case is the trylock on
201                  * the resv object while holding the lru_lock.
202                  */
203                 spin_lock(&bo->bdev->lru_lock);
204                 bo->base.resv = &bo->base._resv;
205                 spin_unlock(&bo->bdev->lru_lock);
206         }
207
208         return r;
209 }
210
211 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
212 {
213         struct dma_resv *resv = &bo->base._resv;
214         struct dma_resv_iter cursor;
215         struct dma_fence *fence;
216
217         dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
218         dma_resv_for_each_fence_unlocked(&cursor, fence) {
219                 if (!fence->ops->signaled)
220                         dma_fence_enable_sw_signaling(fence);
221         }
222         dma_resv_iter_end(&cursor);
223 }
224
225 /**
226  * ttm_bo_cleanup_refs
227  * If bo idle, remove from lru lists, and unref.
228  * If not idle, block if possible.
229  *
230  * Must be called with lru_lock and reservation held, this function
231  * will drop the lru lock and optionally the reservation lock before returning.
232  *
233  * @bo:                    The buffer object to clean-up
234  * @interruptible:         Any sleeps should occur interruptibly.
235  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
236  * @unlock_resv:           Unlock the reservation lock as well.
237  */
238
239 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
240                                bool interruptible, bool no_wait_gpu,
241                                bool unlock_resv)
242 {
243         struct dma_resv *resv = &bo->base._resv;
244         int ret;
245
246         if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
247                 ret = 0;
248         else
249                 ret = -EBUSY;
250
251         if (ret && !no_wait_gpu) {
252                 long lret;
253
254                 if (unlock_resv)
255                         dma_resv_unlock(bo->base.resv);
256                 spin_unlock(&bo->bdev->lru_lock);
257
258                 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
259                                              interruptible,
260                                              30 * HZ);
261
262                 if (lret < 0)
263                         return lret;
264                 else if (lret == 0)
265                         return -EBUSY;
266
267                 spin_lock(&bo->bdev->lru_lock);
268                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
269                         /*
270                          * We raced, and lost, someone else holds the reservation now,
271                          * and is probably busy in ttm_bo_cleanup_memtype_use.
272                          *
273                          * Even if it's not the case, because we finished waiting any
274                          * delayed destruction would succeed, so just return success
275                          * here.
276                          */
277                         spin_unlock(&bo->bdev->lru_lock);
278                         return 0;
279                 }
280                 ret = 0;
281         }
282
283         if (ret || unlikely(list_empty(&bo->ddestroy))) {
284                 if (unlock_resv)
285                         dma_resv_unlock(bo->base.resv);
286                 spin_unlock(&bo->bdev->lru_lock);
287                 return ret;
288         }
289
290         list_del_init(&bo->ddestroy);
291         spin_unlock(&bo->bdev->lru_lock);
292         ttm_bo_cleanup_memtype_use(bo);
293
294         if (unlock_resv)
295                 dma_resv_unlock(bo->base.resv);
296
297         ttm_bo_put(bo);
298
299         return 0;
300 }
301
302 /*
303  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
304  * encountered buffers.
305  */
306 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
307 {
308         struct list_head removed;
309         bool empty;
310
311         INIT_LIST_HEAD(&removed);
312
313         spin_lock(&bdev->lru_lock);
314         while (!list_empty(&bdev->ddestroy)) {
315                 struct ttm_buffer_object *bo;
316
317                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
318                                       ddestroy);
319                 list_move_tail(&bo->ddestroy, &removed);
320                 if (!ttm_bo_get_unless_zero(bo))
321                         continue;
322
323                 if (remove_all || bo->base.resv != &bo->base._resv) {
324                         spin_unlock(&bdev->lru_lock);
325                         dma_resv_lock(bo->base.resv, NULL);
326
327                         spin_lock(&bdev->lru_lock);
328                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
329
330                 } else if (dma_resv_trylock(bo->base.resv)) {
331                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
332                 } else {
333                         spin_unlock(&bdev->lru_lock);
334                 }
335
336                 ttm_bo_put(bo);
337                 spin_lock(&bdev->lru_lock);
338         }
339         list_splice_tail(&removed, &bdev->ddestroy);
340         empty = list_empty(&bdev->ddestroy);
341         spin_unlock(&bdev->lru_lock);
342
343         return empty;
344 }
345
346 static void ttm_bo_release(struct kref *kref)
347 {
348         struct ttm_buffer_object *bo =
349             container_of(kref, struct ttm_buffer_object, kref);
350         struct ttm_device *bdev = bo->bdev;
351         int ret;
352
353         WARN_ON_ONCE(bo->pin_count);
354         WARN_ON_ONCE(bo->bulk_move);
355
356         if (!bo->deleted) {
357                 ret = ttm_bo_individualize_resv(bo);
358                 if (ret) {
359                         /* Last resort, if we fail to allocate memory for the
360                          * fences block for the BO to become idle
361                          */
362                         dma_resv_wait_timeout(bo->base.resv,
363                                               DMA_RESV_USAGE_BOOKKEEP, false,
364                                               30 * HZ);
365                 }
366
367                 if (bo->bdev->funcs->release_notify)
368                         bo->bdev->funcs->release_notify(bo);
369
370                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
371                 ttm_mem_io_free(bdev, bo->resource);
372         }
373
374         if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
375             !dma_resv_trylock(bo->base.resv)) {
376                 /* The BO is not idle, resurrect it for delayed destroy */
377                 ttm_bo_flush_all_fences(bo);
378                 bo->deleted = true;
379
380                 spin_lock(&bo->bdev->lru_lock);
381
382                 /*
383                  * Make pinned bos immediately available to
384                  * shrinkers, now that they are queued for
385                  * destruction.
386                  *
387                  * FIXME: QXL is triggering this. Can be removed when the
388                  * driver is fixed.
389                  */
390                 if (bo->pin_count) {
391                         bo->pin_count = 0;
392                         ttm_resource_move_to_lru_tail(bo->resource);
393                 }
394
395                 kref_init(&bo->kref);
396                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
397                 spin_unlock(&bo->bdev->lru_lock);
398
399                 schedule_delayed_work(&bdev->wq,
400                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
401                 return;
402         }
403
404         spin_lock(&bo->bdev->lru_lock);
405         list_del(&bo->ddestroy);
406         spin_unlock(&bo->bdev->lru_lock);
407
408         ttm_bo_cleanup_memtype_use(bo);
409         dma_resv_unlock(bo->base.resv);
410
411         atomic_dec(&ttm_glob.bo_count);
412         bo->destroy(bo);
413 }
414
415 void ttm_bo_put(struct ttm_buffer_object *bo)
416 {
417         kref_put(&bo->kref, ttm_bo_release);
418 }
419 EXPORT_SYMBOL(ttm_bo_put);
420
421 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
422 {
423         return cancel_delayed_work_sync(&bdev->wq);
424 }
425 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
426
427 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
428 {
429         if (resched)
430                 schedule_delayed_work(&bdev->wq,
431                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
432 }
433 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
434
435 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
436                                      struct ttm_resource **mem,
437                                      struct ttm_operation_ctx *ctx,
438                                      struct ttm_place *hop)
439 {
440         struct ttm_placement hop_placement;
441         struct ttm_resource *hop_mem;
442         int ret;
443
444         hop_placement.num_placement = hop_placement.num_busy_placement = 1;
445         hop_placement.placement = hop_placement.busy_placement = hop;
446
447         /* find space in the bounce domain */
448         ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
449         if (ret)
450                 return ret;
451         /* move to the bounce domain */
452         ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
453         if (ret) {
454                 ttm_resource_free(bo, &hop_mem);
455                 return ret;
456         }
457         return 0;
458 }
459
460 static int ttm_bo_evict(struct ttm_buffer_object *bo,
461                         struct ttm_operation_ctx *ctx)
462 {
463         struct ttm_device *bdev = bo->bdev;
464         struct ttm_resource *evict_mem;
465         struct ttm_placement placement;
466         struct ttm_place hop;
467         int ret = 0;
468
469         memset(&hop, 0, sizeof(hop));
470
471         dma_resv_assert_held(bo->base.resv);
472
473         placement.num_placement = 0;
474         placement.num_busy_placement = 0;
475         bdev->funcs->evict_flags(bo, &placement);
476
477         if (!placement.num_placement && !placement.num_busy_placement) {
478                 ret = ttm_bo_wait(bo, true, false);
479                 if (ret)
480                         return ret;
481
482                 /*
483                  * Since we've already synced, this frees backing store
484                  * immediately.
485                  */
486                 return ttm_bo_pipeline_gutting(bo);
487         }
488
489         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
490         if (ret) {
491                 if (ret != -ERESTARTSYS) {
492                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
493                                bo);
494                         ttm_bo_mem_space_debug(bo, &placement);
495                 }
496                 goto out;
497         }
498
499 bounce:
500         ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
501         if (ret == -EMULTIHOP) {
502                 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
503                 if (ret) {
504                         pr_err("Buffer eviction failed\n");
505                         ttm_resource_free(bo, &evict_mem);
506                         goto out;
507                 }
508                 /* try and move to final place now. */
509                 goto bounce;
510         }
511 out:
512         return ret;
513 }
514
515 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
516                               const struct ttm_place *place)
517 {
518         struct ttm_resource *res = bo->resource;
519         struct ttm_device *bdev = bo->bdev;
520
521         dma_resv_assert_held(bo->base.resv);
522         if (bo->resource->mem_type == TTM_PL_SYSTEM)
523                 return true;
524
525         /* Don't evict this BO if it's outside of the
526          * requested placement range
527          */
528         return ttm_resource_intersects(bdev, res, place, bo->base.size);
529 }
530 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
531
532 /*
533  * Check the target bo is allowable to be evicted or swapout, including cases:
534  *
535  * a. if share same reservation object with ctx->resv, have assumption
536  * reservation objects should already be locked, so not lock again and
537  * return true directly when either the opreation allow_reserved_eviction
538  * or the target bo already is in delayed free list;
539  *
540  * b. Otherwise, trylock it.
541  */
542 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
543                                            struct ttm_operation_ctx *ctx,
544                                            const struct ttm_place *place,
545                                            bool *locked, bool *busy)
546 {
547         bool ret = false;
548
549         if (bo->base.resv == ctx->resv) {
550                 dma_resv_assert_held(bo->base.resv);
551                 if (ctx->allow_res_evict)
552                         ret = true;
553                 *locked = false;
554                 if (busy)
555                         *busy = false;
556         } else {
557                 ret = dma_resv_trylock(bo->base.resv);
558                 *locked = ret;
559                 if (busy)
560                         *busy = !ret;
561         }
562
563         if (ret && place && (bo->resource->mem_type != place->mem_type ||
564                 !bo->bdev->funcs->eviction_valuable(bo, place))) {
565                 ret = false;
566                 if (*locked) {
567                         dma_resv_unlock(bo->base.resv);
568                         *locked = false;
569                 }
570         }
571
572         return ret;
573 }
574
575 /**
576  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
577  *
578  * @busy_bo: BO which couldn't be locked with trylock
579  * @ctx: operation context
580  * @ticket: acquire ticket
581  *
582  * Try to lock a busy buffer object to avoid failing eviction.
583  */
584 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
585                                    struct ttm_operation_ctx *ctx,
586                                    struct ww_acquire_ctx *ticket)
587 {
588         int r;
589
590         if (!busy_bo || !ticket)
591                 return -EBUSY;
592
593         if (ctx->interruptible)
594                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
595                                                           ticket);
596         else
597                 r = dma_resv_lock(busy_bo->base.resv, ticket);
598
599         /*
600          * TODO: It would be better to keep the BO locked until allocation is at
601          * least tried one more time, but that would mean a much larger rework
602          * of TTM.
603          */
604         if (!r)
605                 dma_resv_unlock(busy_bo->base.resv);
606
607         return r == -EDEADLK ? -EBUSY : r;
608 }
609
610 int ttm_mem_evict_first(struct ttm_device *bdev,
611                         struct ttm_resource_manager *man,
612                         const struct ttm_place *place,
613                         struct ttm_operation_ctx *ctx,
614                         struct ww_acquire_ctx *ticket)
615 {
616         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
617         struct ttm_resource_cursor cursor;
618         struct ttm_resource *res;
619         bool locked = false;
620         int ret;
621
622         spin_lock(&bdev->lru_lock);
623         ttm_resource_manager_for_each_res(man, &cursor, res) {
624                 bool busy;
625
626                 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
627                                                     &locked, &busy)) {
628                         if (busy && !busy_bo && ticket !=
629                             dma_resv_locking_ctx(res->bo->base.resv))
630                                 busy_bo = res->bo;
631                         continue;
632                 }
633
634                 if (ttm_bo_get_unless_zero(res->bo)) {
635                         bo = res->bo;
636                         break;
637                 }
638                 if (locked)
639                         dma_resv_unlock(res->bo->base.resv);
640         }
641
642         if (!bo) {
643                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
644                         busy_bo = NULL;
645                 spin_unlock(&bdev->lru_lock);
646                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
647                 if (busy_bo)
648                         ttm_bo_put(busy_bo);
649                 return ret;
650         }
651
652         if (bo->deleted) {
653                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
654                                           ctx->no_wait_gpu, locked);
655                 ttm_bo_put(bo);
656                 return ret;
657         }
658
659         spin_unlock(&bdev->lru_lock);
660
661         ret = ttm_bo_evict(bo, ctx);
662         if (locked)
663                 ttm_bo_unreserve(bo);
664         else
665                 ttm_bo_move_to_lru_tail_unlocked(bo);
666
667         ttm_bo_put(bo);
668         return ret;
669 }
670
671 /**
672  * ttm_bo_pin - Pin the buffer object.
673  * @bo: The buffer object to pin
674  *
675  * Make sure the buffer is not evicted any more during memory pressure.
676  * @bo must be unpinned again by calling ttm_bo_unpin().
677  */
678 void ttm_bo_pin(struct ttm_buffer_object *bo)
679 {
680         dma_resv_assert_held(bo->base.resv);
681         WARN_ON_ONCE(!kref_read(&bo->kref));
682         spin_lock(&bo->bdev->lru_lock);
683         if (bo->resource)
684                 ttm_resource_del_bulk_move(bo->resource, bo);
685         ++bo->pin_count;
686         spin_unlock(&bo->bdev->lru_lock);
687 }
688 EXPORT_SYMBOL(ttm_bo_pin);
689
690 /**
691  * ttm_bo_unpin - Unpin the buffer object.
692  * @bo: The buffer object to unpin
693  *
694  * Allows the buffer object to be evicted again during memory pressure.
695  */
696 void ttm_bo_unpin(struct ttm_buffer_object *bo)
697 {
698         dma_resv_assert_held(bo->base.resv);
699         WARN_ON_ONCE(!kref_read(&bo->kref));
700         if (WARN_ON_ONCE(!bo->pin_count))
701                 return;
702
703         spin_lock(&bo->bdev->lru_lock);
704         --bo->pin_count;
705         if (bo->resource)
706                 ttm_resource_add_bulk_move(bo->resource, bo);
707         spin_unlock(&bo->bdev->lru_lock);
708 }
709 EXPORT_SYMBOL(ttm_bo_unpin);
710
711 /*
712  * Add the last move fence to the BO as kernel dependency and reserve a new
713  * fence slot.
714  */
715 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
716                                  struct ttm_resource_manager *man,
717                                  struct ttm_resource *mem,
718                                  bool no_wait_gpu)
719 {
720         struct dma_fence *fence;
721         int ret;
722
723         spin_lock(&man->move_lock);
724         fence = dma_fence_get(man->move);
725         spin_unlock(&man->move_lock);
726
727         if (!fence)
728                 return 0;
729
730         if (no_wait_gpu) {
731                 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
732                 dma_fence_put(fence);
733                 return ret;
734         }
735
736         dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
737
738         ret = dma_resv_reserve_fences(bo->base.resv, 1);
739         dma_fence_put(fence);
740         return ret;
741 }
742
743 /*
744  * Repeatedly evict memory from the LRU for @mem_type until we create enough
745  * space, or we've evicted everything and there isn't enough space.
746  */
747 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
748                                   const struct ttm_place *place,
749                                   struct ttm_resource **mem,
750                                   struct ttm_operation_ctx *ctx)
751 {
752         struct ttm_device *bdev = bo->bdev;
753         struct ttm_resource_manager *man;
754         struct ww_acquire_ctx *ticket;
755         int ret;
756
757         man = ttm_manager_type(bdev, place->mem_type);
758         ticket = dma_resv_locking_ctx(bo->base.resv);
759         do {
760                 ret = ttm_resource_alloc(bo, place, mem);
761                 if (likely(!ret))
762                         break;
763                 if (unlikely(ret != -ENOSPC))
764                         return ret;
765                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
766                                           ticket);
767                 if (unlikely(ret != 0))
768                         return ret;
769         } while (1);
770
771         return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
772 }
773
774 /*
775  * Creates space for memory region @mem according to its type.
776  *
777  * This function first searches for free space in compatible memory types in
778  * the priority order defined by the driver.  If free space isn't found, then
779  * ttm_bo_mem_force_space is attempted in priority order to evict and find
780  * space.
781  */
782 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
783                         struct ttm_placement *placement,
784                         struct ttm_resource **mem,
785                         struct ttm_operation_ctx *ctx)
786 {
787         struct ttm_device *bdev = bo->bdev;
788         bool type_found = false;
789         int i, ret;
790
791         ret = dma_resv_reserve_fences(bo->base.resv, 1);
792         if (unlikely(ret))
793                 return ret;
794
795         for (i = 0; i < placement->num_placement; ++i) {
796                 const struct ttm_place *place = &placement->placement[i];
797                 struct ttm_resource_manager *man;
798
799                 man = ttm_manager_type(bdev, place->mem_type);
800                 if (!man || !ttm_resource_manager_used(man))
801                         continue;
802
803                 type_found = true;
804                 ret = ttm_resource_alloc(bo, place, mem);
805                 if (ret == -ENOSPC)
806                         continue;
807                 if (unlikely(ret))
808                         goto error;
809
810                 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
811                 if (unlikely(ret)) {
812                         ttm_resource_free(bo, mem);
813                         if (ret == -EBUSY)
814                                 continue;
815
816                         goto error;
817                 }
818                 return 0;
819         }
820
821         for (i = 0; i < placement->num_busy_placement; ++i) {
822                 const struct ttm_place *place = &placement->busy_placement[i];
823                 struct ttm_resource_manager *man;
824
825                 man = ttm_manager_type(bdev, place->mem_type);
826                 if (!man || !ttm_resource_manager_used(man))
827                         continue;
828
829                 type_found = true;
830                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
831                 if (likely(!ret))
832                         return 0;
833
834                 if (ret && ret != -EBUSY)
835                         goto error;
836         }
837
838         ret = -ENOMEM;
839         if (!type_found) {
840                 pr_err(TTM_PFX "No compatible memory type found\n");
841                 ret = -EINVAL;
842         }
843
844 error:
845         return ret;
846 }
847 EXPORT_SYMBOL(ttm_bo_mem_space);
848
849 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
850                               struct ttm_placement *placement,
851                               struct ttm_operation_ctx *ctx)
852 {
853         struct ttm_resource *mem;
854         struct ttm_place hop;
855         int ret;
856
857         dma_resv_assert_held(bo->base.resv);
858
859         /*
860          * Determine where to move the buffer.
861          *
862          * If driver determines move is going to need
863          * an extra step then it will return -EMULTIHOP
864          * and the buffer will be moved to the temporary
865          * stop and the driver will be called to make
866          * the second hop.
867          */
868         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
869         if (ret)
870                 return ret;
871 bounce:
872         ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
873         if (ret == -EMULTIHOP) {
874                 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
875                 if (ret)
876                         goto out;
877                 /* try and move to final place now. */
878                 goto bounce;
879         }
880 out:
881         if (ret)
882                 ttm_resource_free(bo, &mem);
883         return ret;
884 }
885
886 int ttm_bo_validate(struct ttm_buffer_object *bo,
887                     struct ttm_placement *placement,
888                     struct ttm_operation_ctx *ctx)
889 {
890         int ret;
891
892         dma_resv_assert_held(bo->base.resv);
893
894         /*
895          * Remove the backing store if no placement is given.
896          */
897         if (!placement->num_placement && !placement->num_busy_placement)
898                 return ttm_bo_pipeline_gutting(bo);
899
900         /*
901          * Check whether we need to move buffer.
902          */
903         if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
904                 ret = ttm_bo_move_buffer(bo, placement, ctx);
905                 if (ret)
906                         return ret;
907         }
908         /*
909          * We might need to add a TTM.
910          */
911         if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
912                 ret = ttm_tt_create(bo, true);
913                 if (ret)
914                         return ret;
915         }
916         return 0;
917 }
918 EXPORT_SYMBOL(ttm_bo_validate);
919
920 /**
921  * ttm_bo_init_reserved
922  *
923  * @bdev: Pointer to a ttm_device struct.
924  * @bo: Pointer to a ttm_buffer_object to be initialized.
925  * @type: Requested type of buffer object.
926  * @placement: Initial placement for buffer object.
927  * @alignment: Data alignment in pages.
928  * @ctx: TTM operation context for memory allocation.
929  * @sg: Scatter-gather table.
930  * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
931  * @destroy: Destroy function. Use NULL for kfree().
932  *
933  * This function initializes a pre-allocated struct ttm_buffer_object.
934  * As this object may be part of a larger structure, this function,
935  * together with the @destroy function, enables driver-specific objects
936  * derived from a ttm_buffer_object.
937  *
938  * On successful return, the caller owns an object kref to @bo. The kref and
939  * list_kref are usually set to 1, but note that in some situations, other
940  * tasks may already be holding references to @bo as well.
941  * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
942  * and it is the caller's responsibility to call ttm_bo_unreserve.
943  *
944  * If a failure occurs, the function will call the @destroy function. Thus,
945  * after a failure, dereferencing @bo is illegal and will likely cause memory
946  * corruption.
947  *
948  * Returns
949  * -ENOMEM: Out of memory.
950  * -EINVAL: Invalid placement flags.
951  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
952  */
953 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
954                          enum ttm_bo_type type, struct ttm_placement *placement,
955                          uint32_t alignment, struct ttm_operation_ctx *ctx,
956                          struct sg_table *sg, struct dma_resv *resv,
957                          void (*destroy) (struct ttm_buffer_object *))
958 {
959         static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
960         int ret;
961
962         kref_init(&bo->kref);
963         INIT_LIST_HEAD(&bo->ddestroy);
964         bo->bdev = bdev;
965         bo->type = type;
966         bo->page_alignment = alignment;
967         bo->destroy = destroy;
968         bo->pin_count = 0;
969         bo->sg = sg;
970         bo->bulk_move = NULL;
971         if (resv)
972                 bo->base.resv = resv;
973         else
974                 bo->base.resv = &bo->base._resv;
975         atomic_inc(&ttm_glob.bo_count);
976
977         ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
978         if (unlikely(ret)) {
979                 ttm_bo_put(bo);
980                 return ret;
981         }
982
983         /*
984          * For ttm_bo_type_device buffers, allocate
985          * address space from the device.
986          */
987         if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
988                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
989                                          PFN_UP(bo->base.size));
990                 if (ret)
991                         goto err_put;
992         }
993
994         /* passed reservation objects should already be locked,
995          * since otherwise lockdep will be angered in radeon.
996          */
997         if (!resv)
998                 WARN_ON(!dma_resv_trylock(bo->base.resv));
999         else
1000                 dma_resv_assert_held(resv);
1001
1002         ret = ttm_bo_validate(bo, placement, ctx);
1003         if (unlikely(ret))
1004                 goto err_unlock;
1005
1006         return 0;
1007
1008 err_unlock:
1009         if (!resv)
1010                 dma_resv_unlock(bo->base.resv);
1011
1012 err_put:
1013         ttm_bo_put(bo);
1014         return ret;
1015 }
1016 EXPORT_SYMBOL(ttm_bo_init_reserved);
1017
1018 /**
1019  * ttm_bo_init_validate
1020  *
1021  * @bdev: Pointer to a ttm_device struct.
1022  * @bo: Pointer to a ttm_buffer_object to be initialized.
1023  * @type: Requested type of buffer object.
1024  * @placement: Initial placement for buffer object.
1025  * @alignment: Data alignment in pages.
1026  * @interruptible: If needing to sleep to wait for GPU resources,
1027  * sleep interruptible.
1028  * pinned in physical memory. If this behaviour is not desired, this member
1029  * holds a pointer to a persistent shmem object. Typically, this would
1030  * point to the shmem object backing a GEM object if TTM is used to back a
1031  * GEM user interface.
1032  * @sg: Scatter-gather table.
1033  * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
1034  * @destroy: Destroy function. Use NULL for kfree().
1035  *
1036  * This function initializes a pre-allocated struct ttm_buffer_object.
1037  * As this object may be part of a larger structure, this function,
1038  * together with the @destroy function,
1039  * enables driver-specific objects derived from a ttm_buffer_object.
1040  *
1041  * On successful return, the caller owns an object kref to @bo. The kref and
1042  * list_kref are usually set to 1, but note that in some situations, other
1043  * tasks may already be holding references to @bo as well.
1044  *
1045  * If a failure occurs, the function will call the @destroy function, Thus,
1046  * after a failure, dereferencing @bo is illegal and will likely cause memory
1047  * corruption.
1048  *
1049  * Returns
1050  * -ENOMEM: Out of memory.
1051  * -EINVAL: Invalid placement flags.
1052  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1053  */
1054 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1055                          enum ttm_bo_type type, struct ttm_placement *placement,
1056                          uint32_t alignment, bool interruptible,
1057                          struct sg_table *sg, struct dma_resv *resv,
1058                          void (*destroy) (struct ttm_buffer_object *))
1059 {
1060         struct ttm_operation_ctx ctx = { interruptible, false };
1061         int ret;
1062
1063         ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1064                                    sg, resv, destroy);
1065         if (ret)
1066                 return ret;
1067
1068         if (!resv)
1069                 ttm_bo_unreserve(bo);
1070
1071         return 0;
1072 }
1073 EXPORT_SYMBOL(ttm_bo_init_validate);
1074
1075 /*
1076  * buffer object vm functions.
1077  */
1078
1079 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1080 {
1081         struct ttm_device *bdev = bo->bdev;
1082
1083         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1084         ttm_mem_io_free(bdev, bo->resource);
1085 }
1086 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1087
1088 int ttm_bo_wait(struct ttm_buffer_object *bo,
1089                 bool interruptible, bool no_wait)
1090 {
1091         long timeout = 15 * HZ;
1092
1093         if (no_wait) {
1094                 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1095                         return 0;
1096                 else
1097                         return -EBUSY;
1098         }
1099
1100         timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1101                                         interruptible, timeout);
1102         if (timeout < 0)
1103                 return timeout;
1104
1105         if (timeout == 0)
1106                 return -EBUSY;
1107
1108         return 0;
1109 }
1110 EXPORT_SYMBOL(ttm_bo_wait);
1111
1112 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1113                    gfp_t gfp_flags)
1114 {
1115         struct ttm_place place;
1116         bool locked;
1117         int ret;
1118
1119         /*
1120          * While the bo may already reside in SYSTEM placement, set
1121          * SYSTEM as new placement to cover also the move further below.
1122          * The driver may use the fact that we're moving from SYSTEM
1123          * as an indication that we're about to swap out.
1124          */
1125         memset(&place, 0, sizeof(place));
1126         place.mem_type = bo->resource->mem_type;
1127         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1128                 return -EBUSY;
1129
1130         if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1131             bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1132             bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1133             !ttm_bo_get_unless_zero(bo)) {
1134                 if (locked)
1135                         dma_resv_unlock(bo->base.resv);
1136                 return -EBUSY;
1137         }
1138
1139         if (bo->deleted) {
1140                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1141                 ttm_bo_put(bo);
1142                 return ret == -EBUSY ? -ENOSPC : ret;
1143         }
1144
1145         /* TODO: Cleanup the locking */
1146         spin_unlock(&bo->bdev->lru_lock);
1147
1148         /*
1149          * Move to system cached
1150          */
1151         if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1152                 struct ttm_operation_ctx ctx = { false, false };
1153                 struct ttm_resource *evict_mem;
1154                 struct ttm_place hop;
1155
1156                 memset(&hop, 0, sizeof(hop));
1157                 place.mem_type = TTM_PL_SYSTEM;
1158                 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1159                 if (unlikely(ret))
1160                         goto out;
1161
1162                 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1163                 if (unlikely(ret != 0)) {
1164                         WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1165                         goto out;
1166                 }
1167         }
1168
1169         /*
1170          * Make sure BO is idle.
1171          */
1172         ret = ttm_bo_wait(bo, false, false);
1173         if (unlikely(ret != 0))
1174                 goto out;
1175
1176         ttm_bo_unmap_virtual(bo);
1177
1178         /*
1179          * Swap out. Buffer will be swapped in again as soon as
1180          * anyone tries to access a ttm page.
1181          */
1182         if (bo->bdev->funcs->swap_notify)
1183                 bo->bdev->funcs->swap_notify(bo);
1184
1185         if (ttm_tt_is_populated(bo->ttm))
1186                 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1187 out:
1188
1189         /*
1190          * Unreserve without putting on LRU to avoid swapping out an
1191          * already swapped buffer.
1192          */
1193         if (locked)
1194                 dma_resv_unlock(bo->base.resv);
1195         ttm_bo_put(bo);
1196         return ret == -EBUSY ? -ENOSPC : ret;
1197 }
1198
1199 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1200 {
1201         if (bo->ttm == NULL)
1202                 return;
1203
1204         ttm_tt_unpopulate(bo->bdev, bo->ttm);
1205         ttm_tt_destroy(bo->bdev, bo->ttm);
1206         bo->ttm = NULL;
1207 }