drm/ttm: cleanup LRU handling further
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44
45 #include "ttm_module.h"
46
47 static void ttm_bo_global_kobj_release(struct kobject *kobj);
48
49 /*
50  * ttm_global_mutex - protecting the global BO state
51  */
52 DEFINE_MUTEX(ttm_global_mutex);
53 unsigned ttm_bo_glob_use_count;
54 struct ttm_bo_global ttm_bo_glob;
55 EXPORT_SYMBOL(ttm_bo_glob);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 /* default destructor */
63 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
64 {
65         kfree(bo);
66 }
67
68 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
69                                         struct ttm_placement *placement)
70 {
71         struct drm_printer p = drm_debug_printer(TTM_PFX);
72         struct ttm_resource_manager *man;
73         int i, mem_type;
74
75         drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
76                    bo, bo->mem.num_pages, bo->base.size >> 10,
77                    bo->base.size >> 20);
78         for (i = 0; i < placement->num_placement; i++) {
79                 mem_type = placement->placement[i].mem_type;
80                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
81                            i, placement->placement[i].flags, mem_type);
82                 man = ttm_manager_type(bo->bdev, mem_type);
83                 ttm_resource_manager_debug(man, &p);
84         }
85 }
86
87 static ssize_t ttm_bo_global_show(struct kobject *kobj,
88                                   struct attribute *attr,
89                                   char *buffer)
90 {
91         struct ttm_bo_global *glob =
92                 container_of(kobj, struct ttm_bo_global, kobj);
93
94         return snprintf(buffer, PAGE_SIZE, "%d\n",
95                                 atomic_read(&glob->bo_count));
96 }
97
98 static struct attribute *ttm_bo_global_attrs[] = {
99         &ttm_bo_count,
100         NULL
101 };
102
103 static const struct sysfs_ops ttm_bo_global_ops = {
104         .show = &ttm_bo_global_show
105 };
106
107 static struct kobj_type ttm_bo_glob_kobj_type  = {
108         .release = &ttm_bo_global_kobj_release,
109         .sysfs_ops = &ttm_bo_global_ops,
110         .default_attrs = ttm_bo_global_attrs
111 };
112
113 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
114 {
115         struct ttm_bo_device *bdev = bo->bdev;
116
117         list_del_init(&bo->swap);
118         list_del_init(&bo->lru);
119
120         if (bdev->driver->del_from_lru_notify)
121                 bdev->driver->del_from_lru_notify(bo);
122 }
123
124 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
125                                      struct ttm_buffer_object *bo)
126 {
127         if (!pos->first)
128                 pos->first = bo;
129         pos->last = bo;
130 }
131
132 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
133                              struct ttm_resource *mem,
134                              struct ttm_lru_bulk_move *bulk)
135 {
136         struct ttm_bo_device *bdev = bo->bdev;
137         struct ttm_resource_manager *man;
138
139         dma_resv_assert_held(bo->base.resv);
140
141         if (bo->pin_count)
142                 return;
143
144         man = ttm_manager_type(bdev, mem->mem_type);
145         list_move_tail(&bo->lru, &man->lru[bo->priority]);
146         if (man->use_tt && bo->ttm &&
147             !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
148                                      TTM_PAGE_FLAG_SWAPPED))) {
149                 struct list_head *swap;
150
151                 swap = &ttm_bo_glob.swap_lru[bo->priority];
152                 list_move_tail(&bo->swap, swap);
153         }
154
155         if (bdev->driver->del_from_lru_notify)
156                 bdev->driver->del_from_lru_notify(bo);
157
158         if (bulk && !bo->pin_count) {
159                 switch (bo->mem.mem_type) {
160                 case TTM_PL_TT:
161                         ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
162                         break;
163
164                 case TTM_PL_VRAM:
165                         ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
166                         break;
167                 }
168                 if (bo->ttm && !(bo->ttm->page_flags &
169                                  (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
170                         ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
171         }
172 }
173 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
174
175 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
176 {
177         unsigned i;
178
179         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
180                 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
181                 struct ttm_resource_manager *man;
182
183                 if (!pos->first)
184                         continue;
185
186                 dma_resv_assert_held(pos->first->base.resv);
187                 dma_resv_assert_held(pos->last->base.resv);
188
189                 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
190                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
191                                     &pos->last->lru);
192         }
193
194         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
195                 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
196                 struct ttm_resource_manager *man;
197
198                 if (!pos->first)
199                         continue;
200
201                 dma_resv_assert_held(pos->first->base.resv);
202                 dma_resv_assert_held(pos->last->base.resv);
203
204                 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
205                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
206                                     &pos->last->lru);
207         }
208
209         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
210                 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
211                 struct list_head *lru;
212
213                 if (!pos->first)
214                         continue;
215
216                 dma_resv_assert_held(pos->first->base.resv);
217                 dma_resv_assert_held(pos->last->base.resv);
218
219                 lru = &ttm_bo_glob.swap_lru[i];
220                 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
221         }
222 }
223 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
224
225 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
226                                   struct ttm_resource *mem, bool evict,
227                                   struct ttm_operation_ctx *ctx,
228                                   struct ttm_place *hop)
229 {
230         struct ttm_bo_device *bdev = bo->bdev;
231         struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
232         struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
233         int ret;
234
235         ttm_bo_unmap_virtual(bo);
236
237         /*
238          * Create and bind a ttm if required.
239          */
240
241         if (new_man->use_tt) {
242                 /* Zero init the new TTM structure if the old location should
243                  * have used one as well.
244                  */
245                 ret = ttm_tt_create(bo, old_man->use_tt);
246                 if (ret)
247                         goto out_err;
248
249                 if (mem->mem_type != TTM_PL_SYSTEM) {
250                         ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
251                         if (ret)
252                                 goto out_err;
253                 }
254         }
255
256         ret = bdev->driver->move(bo, evict, ctx, mem, hop);
257         if (ret) {
258                 if (ret == -EMULTIHOP)
259                         return ret;
260                 goto out_err;
261         }
262
263         ctx->bytes_moved += bo->base.size;
264         return 0;
265
266 out_err:
267         new_man = ttm_manager_type(bdev, bo->mem.mem_type);
268         if (!new_man->use_tt)
269                 ttm_bo_tt_destroy(bo);
270
271         return ret;
272 }
273
274 /*
275  * Call bo::reserved.
276  * Will release GPU memory type usage on destruction.
277  * This is the place to put in driver specific hooks to release
278  * driver private resources.
279  * Will release the bo::reserved lock.
280  */
281
282 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
283 {
284         if (bo->bdev->driver->delete_mem_notify)
285                 bo->bdev->driver->delete_mem_notify(bo);
286
287         ttm_bo_tt_destroy(bo);
288         ttm_resource_free(bo, &bo->mem);
289 }
290
291 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
292 {
293         int r;
294
295         if (bo->base.resv == &bo->base._resv)
296                 return 0;
297
298         BUG_ON(!dma_resv_trylock(&bo->base._resv));
299
300         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
301         dma_resv_unlock(&bo->base._resv);
302         if (r)
303                 return r;
304
305         if (bo->type != ttm_bo_type_sg) {
306                 /* This works because the BO is about to be destroyed and nobody
307                  * reference it any more. The only tricky case is the trylock on
308                  * the resv object while holding the lru_lock.
309                  */
310                 spin_lock(&ttm_bo_glob.lru_lock);
311                 bo->base.resv = &bo->base._resv;
312                 spin_unlock(&ttm_bo_glob.lru_lock);
313         }
314
315         return r;
316 }
317
318 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
319 {
320         struct dma_resv *resv = &bo->base._resv;
321         struct dma_resv_list *fobj;
322         struct dma_fence *fence;
323         int i;
324
325         rcu_read_lock();
326         fobj = rcu_dereference(resv->fence);
327         fence = rcu_dereference(resv->fence_excl);
328         if (fence && !fence->ops->signaled)
329                 dma_fence_enable_sw_signaling(fence);
330
331         for (i = 0; fobj && i < fobj->shared_count; ++i) {
332                 fence = rcu_dereference(fobj->shared[i]);
333
334                 if (!fence->ops->signaled)
335                         dma_fence_enable_sw_signaling(fence);
336         }
337         rcu_read_unlock();
338 }
339
340 /**
341  * function ttm_bo_cleanup_refs
342  * If bo idle, remove from lru lists, and unref.
343  * If not idle, block if possible.
344  *
345  * Must be called with lru_lock and reservation held, this function
346  * will drop the lru lock and optionally the reservation lock before returning.
347  *
348  * @bo:                    The buffer object to clean-up
349  * @interruptible:         Any sleeps should occur interruptibly.
350  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
351  * @unlock_resv:           Unlock the reservation lock as well.
352  */
353
354 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
355                                bool interruptible, bool no_wait_gpu,
356                                bool unlock_resv)
357 {
358         struct dma_resv *resv = &bo->base._resv;
359         int ret;
360
361         if (dma_resv_test_signaled_rcu(resv, true))
362                 ret = 0;
363         else
364                 ret = -EBUSY;
365
366         if (ret && !no_wait_gpu) {
367                 long lret;
368
369                 if (unlock_resv)
370                         dma_resv_unlock(bo->base.resv);
371                 spin_unlock(&ttm_bo_glob.lru_lock);
372
373                 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
374                                                  30 * HZ);
375
376                 if (lret < 0)
377                         return lret;
378                 else if (lret == 0)
379                         return -EBUSY;
380
381                 spin_lock(&ttm_bo_glob.lru_lock);
382                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
383                         /*
384                          * We raced, and lost, someone else holds the reservation now,
385                          * and is probably busy in ttm_bo_cleanup_memtype_use.
386                          *
387                          * Even if it's not the case, because we finished waiting any
388                          * delayed destruction would succeed, so just return success
389                          * here.
390                          */
391                         spin_unlock(&ttm_bo_glob.lru_lock);
392                         return 0;
393                 }
394                 ret = 0;
395         }
396
397         if (ret || unlikely(list_empty(&bo->ddestroy))) {
398                 if (unlock_resv)
399                         dma_resv_unlock(bo->base.resv);
400                 spin_unlock(&ttm_bo_glob.lru_lock);
401                 return ret;
402         }
403
404         ttm_bo_del_from_lru(bo);
405         list_del_init(&bo->ddestroy);
406         spin_unlock(&ttm_bo_glob.lru_lock);
407         ttm_bo_cleanup_memtype_use(bo);
408
409         if (unlock_resv)
410                 dma_resv_unlock(bo->base.resv);
411
412         ttm_bo_put(bo);
413
414         return 0;
415 }
416
417 /*
418  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
419  * encountered buffers.
420  */
421 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
422 {
423         struct ttm_bo_global *glob = &ttm_bo_glob;
424         struct list_head removed;
425         bool empty;
426
427         INIT_LIST_HEAD(&removed);
428
429         spin_lock(&glob->lru_lock);
430         while (!list_empty(&bdev->ddestroy)) {
431                 struct ttm_buffer_object *bo;
432
433                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
434                                       ddestroy);
435                 list_move_tail(&bo->ddestroy, &removed);
436                 if (!ttm_bo_get_unless_zero(bo))
437                         continue;
438
439                 if (remove_all || bo->base.resv != &bo->base._resv) {
440                         spin_unlock(&glob->lru_lock);
441                         dma_resv_lock(bo->base.resv, NULL);
442
443                         spin_lock(&glob->lru_lock);
444                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
445
446                 } else if (dma_resv_trylock(bo->base.resv)) {
447                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
448                 } else {
449                         spin_unlock(&glob->lru_lock);
450                 }
451
452                 ttm_bo_put(bo);
453                 spin_lock(&glob->lru_lock);
454         }
455         list_splice_tail(&removed, &bdev->ddestroy);
456         empty = list_empty(&bdev->ddestroy);
457         spin_unlock(&glob->lru_lock);
458
459         return empty;
460 }
461
462 static void ttm_bo_delayed_workqueue(struct work_struct *work)
463 {
464         struct ttm_bo_device *bdev =
465             container_of(work, struct ttm_bo_device, wq.work);
466
467         if (!ttm_bo_delayed_delete(bdev, false))
468                 schedule_delayed_work(&bdev->wq,
469                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
470 }
471
472 static void ttm_bo_release(struct kref *kref)
473 {
474         struct ttm_buffer_object *bo =
475             container_of(kref, struct ttm_buffer_object, kref);
476         struct ttm_bo_device *bdev = bo->bdev;
477         size_t acc_size = bo->acc_size;
478         int ret;
479
480         if (!bo->deleted) {
481                 ret = ttm_bo_individualize_resv(bo);
482                 if (ret) {
483                         /* Last resort, if we fail to allocate memory for the
484                          * fences block for the BO to become idle
485                          */
486                         dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
487                                                   30 * HZ);
488                 }
489
490                 if (bo->bdev->driver->release_notify)
491                         bo->bdev->driver->release_notify(bo);
492
493                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
494                 ttm_mem_io_free(bdev, &bo->mem);
495         }
496
497         if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
498             !dma_resv_trylock(bo->base.resv)) {
499                 /* The BO is not idle, resurrect it for delayed destroy */
500                 ttm_bo_flush_all_fences(bo);
501                 bo->deleted = true;
502
503                 spin_lock(&ttm_bo_glob.lru_lock);
504
505                 /*
506                  * Make pinned bos immediately available to
507                  * shrinkers, now that they are queued for
508                  * destruction.
509                  */
510                 if (WARN_ON(bo->pin_count)) {
511                         bo->pin_count = 0;
512                         ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
513                 }
514
515                 kref_init(&bo->kref);
516                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
517                 spin_unlock(&ttm_bo_glob.lru_lock);
518
519                 schedule_delayed_work(&bdev->wq,
520                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
521                 return;
522         }
523
524         spin_lock(&ttm_bo_glob.lru_lock);
525         ttm_bo_del_from_lru(bo);
526         list_del(&bo->ddestroy);
527         spin_unlock(&ttm_bo_glob.lru_lock);
528
529         ttm_bo_cleanup_memtype_use(bo);
530         dma_resv_unlock(bo->base.resv);
531
532         atomic_dec(&ttm_bo_glob.bo_count);
533         dma_fence_put(bo->moving);
534         if (!ttm_bo_uses_embedded_gem_object(bo))
535                 dma_resv_fini(&bo->base._resv);
536         bo->destroy(bo);
537         ttm_mem_global_free(&ttm_mem_glob, acc_size);
538 }
539
540 void ttm_bo_put(struct ttm_buffer_object *bo)
541 {
542         kref_put(&bo->kref, ttm_bo_release);
543 }
544 EXPORT_SYMBOL(ttm_bo_put);
545
546 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
547 {
548         return cancel_delayed_work_sync(&bdev->wq);
549 }
550 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
551
552 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
553 {
554         if (resched)
555                 schedule_delayed_work(&bdev->wq,
556                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
557 }
558 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
559
560 static int ttm_bo_evict(struct ttm_buffer_object *bo,
561                         struct ttm_operation_ctx *ctx)
562 {
563         struct ttm_bo_device *bdev = bo->bdev;
564         struct ttm_resource evict_mem;
565         struct ttm_placement placement;
566         struct ttm_place hop;
567         int ret = 0;
568
569         memset(&hop, 0, sizeof(hop));
570
571         dma_resv_assert_held(bo->base.resv);
572
573         placement.num_placement = 0;
574         placement.num_busy_placement = 0;
575         bdev->driver->evict_flags(bo, &placement);
576
577         if (!placement.num_placement && !placement.num_busy_placement) {
578                 ttm_bo_wait(bo, false, false);
579
580                 ttm_bo_cleanup_memtype_use(bo);
581                 return ttm_tt_create(bo, false);
582         }
583
584         evict_mem = bo->mem;
585         evict_mem.mm_node = NULL;
586         evict_mem.bus.offset = 0;
587         evict_mem.bus.addr = NULL;
588
589         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
590         if (ret) {
591                 if (ret != -ERESTARTSYS) {
592                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
593                                bo);
594                         ttm_bo_mem_space_debug(bo, &placement);
595                 }
596                 goto out;
597         }
598
599         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
600         if (unlikely(ret)) {
601                 WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
602                 if (ret != -ERESTARTSYS)
603                         pr_err("Buffer eviction failed\n");
604                 ttm_resource_free(bo, &evict_mem);
605         }
606 out:
607         return ret;
608 }
609
610 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
611                               const struct ttm_place *place)
612 {
613         /* Don't evict this BO if it's outside of the
614          * requested placement range
615          */
616         if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
617             (place->lpfn && place->lpfn <= bo->mem.start))
618                 return false;
619
620         return true;
621 }
622 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
623
624 /*
625  * Check the target bo is allowable to be evicted or swapout, including cases:
626  *
627  * a. if share same reservation object with ctx->resv, have assumption
628  * reservation objects should already be locked, so not lock again and
629  * return true directly when either the opreation allow_reserved_eviction
630  * or the target bo already is in delayed free list;
631  *
632  * b. Otherwise, trylock it.
633  */
634 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
635                         struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
636 {
637         bool ret = false;
638
639         if (bo->base.resv == ctx->resv) {
640                 dma_resv_assert_held(bo->base.resv);
641                 if (ctx->allow_res_evict)
642                         ret = true;
643                 *locked = false;
644                 if (busy)
645                         *busy = false;
646         } else {
647                 ret = dma_resv_trylock(bo->base.resv);
648                 *locked = ret;
649                 if (busy)
650                         *busy = !ret;
651         }
652
653         return ret;
654 }
655
656 /**
657  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
658  *
659  * @busy_bo: BO which couldn't be locked with trylock
660  * @ctx: operation context
661  * @ticket: acquire ticket
662  *
663  * Try to lock a busy buffer object to avoid failing eviction.
664  */
665 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
666                                    struct ttm_operation_ctx *ctx,
667                                    struct ww_acquire_ctx *ticket)
668 {
669         int r;
670
671         if (!busy_bo || !ticket)
672                 return -EBUSY;
673
674         if (ctx->interruptible)
675                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
676                                                           ticket);
677         else
678                 r = dma_resv_lock(busy_bo->base.resv, ticket);
679
680         /*
681          * TODO: It would be better to keep the BO locked until allocation is at
682          * least tried one more time, but that would mean a much larger rework
683          * of TTM.
684          */
685         if (!r)
686                 dma_resv_unlock(busy_bo->base.resv);
687
688         return r == -EDEADLK ? -EBUSY : r;
689 }
690
691 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
692                         struct ttm_resource_manager *man,
693                         const struct ttm_place *place,
694                         struct ttm_operation_ctx *ctx,
695                         struct ww_acquire_ctx *ticket)
696 {
697         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
698         bool locked = false;
699         unsigned i;
700         int ret;
701
702         spin_lock(&ttm_bo_glob.lru_lock);
703         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
704                 list_for_each_entry(bo, &man->lru[i], lru) {
705                         bool busy;
706
707                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
708                                                             &busy)) {
709                                 if (busy && !busy_bo && ticket !=
710                                     dma_resv_locking_ctx(bo->base.resv))
711                                         busy_bo = bo;
712                                 continue;
713                         }
714
715                         if (place && !bdev->driver->eviction_valuable(bo,
716                                                                       place)) {
717                                 if (locked)
718                                         dma_resv_unlock(bo->base.resv);
719                                 continue;
720                         }
721                         if (!ttm_bo_get_unless_zero(bo)) {
722                                 if (locked)
723                                         dma_resv_unlock(bo->base.resv);
724                                 continue;
725                         }
726                         break;
727                 }
728
729                 /* If the inner loop terminated early, we have our candidate */
730                 if (&bo->lru != &man->lru[i])
731                         break;
732
733                 bo = NULL;
734         }
735
736         if (!bo) {
737                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
738                         busy_bo = NULL;
739                 spin_unlock(&ttm_bo_glob.lru_lock);
740                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
741                 if (busy_bo)
742                         ttm_bo_put(busy_bo);
743                 return ret;
744         }
745
746         if (bo->deleted) {
747                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
748                                           ctx->no_wait_gpu, locked);
749                 ttm_bo_put(bo);
750                 return ret;
751         }
752
753         spin_unlock(&ttm_bo_glob.lru_lock);
754
755         ret = ttm_bo_evict(bo, ctx);
756         if (locked)
757                 ttm_bo_unreserve(bo);
758
759         ttm_bo_put(bo);
760         return ret;
761 }
762
763 /*
764  * Add the last move fence to the BO and reserve a new shared slot.
765  */
766 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
767                                  struct ttm_resource_manager *man,
768                                  struct ttm_resource *mem,
769                                  bool no_wait_gpu)
770 {
771         struct dma_fence *fence;
772         int ret;
773
774         spin_lock(&man->move_lock);
775         fence = dma_fence_get(man->move);
776         spin_unlock(&man->move_lock);
777
778         if (!fence)
779                 return 0;
780
781         if (no_wait_gpu) {
782                 dma_fence_put(fence);
783                 return -EBUSY;
784         }
785
786         dma_resv_add_shared_fence(bo->base.resv, fence);
787
788         ret = dma_resv_reserve_shared(bo->base.resv, 1);
789         if (unlikely(ret)) {
790                 dma_fence_put(fence);
791                 return ret;
792         }
793
794         dma_fence_put(bo->moving);
795         bo->moving = fence;
796         return 0;
797 }
798
799 /*
800  * Repeatedly evict memory from the LRU for @mem_type until we create enough
801  * space, or we've evicted everything and there isn't enough space.
802  */
803 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
804                                   const struct ttm_place *place,
805                                   struct ttm_resource *mem,
806                                   struct ttm_operation_ctx *ctx)
807 {
808         struct ttm_bo_device *bdev = bo->bdev;
809         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
810         struct ww_acquire_ctx *ticket;
811         int ret;
812
813         ticket = dma_resv_locking_ctx(bo->base.resv);
814         do {
815                 ret = ttm_resource_alloc(bo, place, mem);
816                 if (likely(!ret))
817                         break;
818                 if (unlikely(ret != -ENOSPC))
819                         return ret;
820                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
821                                           ticket);
822                 if (unlikely(ret != 0))
823                         return ret;
824         } while (1);
825
826         return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
827 }
828
829 /**
830  * ttm_bo_mem_placement - check if placement is compatible
831  * @bo: BO to find memory for
832  * @place: where to search
833  * @mem: the memory object to fill in
834  *
835  * Check if placement is compatible and fill in mem structure.
836  * Returns -EBUSY if placement won't work or negative error code.
837  * 0 when placement can be used.
838  */
839 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
840                                 const struct ttm_place *place,
841                                 struct ttm_resource *mem)
842 {
843         struct ttm_bo_device *bdev = bo->bdev;
844         struct ttm_resource_manager *man;
845
846         man = ttm_manager_type(bdev, place->mem_type);
847         if (!man || !ttm_resource_manager_used(man))
848                 return -EBUSY;
849
850         mem->mem_type = place->mem_type;
851         mem->placement = place->flags;
852
853         spin_lock(&ttm_bo_glob.lru_lock);
854         ttm_bo_move_to_lru_tail(bo, mem, NULL);
855         spin_unlock(&ttm_bo_glob.lru_lock);
856
857         return 0;
858 }
859
860 /*
861  * Creates space for memory region @mem according to its type.
862  *
863  * This function first searches for free space in compatible memory types in
864  * the priority order defined by the driver.  If free space isn't found, then
865  * ttm_bo_mem_force_space is attempted in priority order to evict and find
866  * space.
867  */
868 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
869                         struct ttm_placement *placement,
870                         struct ttm_resource *mem,
871                         struct ttm_operation_ctx *ctx)
872 {
873         struct ttm_bo_device *bdev = bo->bdev;
874         bool type_found = false;
875         int i, ret;
876
877         ret = dma_resv_reserve_shared(bo->base.resv, 1);
878         if (unlikely(ret))
879                 return ret;
880
881         for (i = 0; i < placement->num_placement; ++i) {
882                 const struct ttm_place *place = &placement->placement[i];
883                 struct ttm_resource_manager *man;
884
885                 ret = ttm_bo_mem_placement(bo, place, mem);
886                 if (ret)
887                         continue;
888
889                 type_found = true;
890                 ret = ttm_resource_alloc(bo, place, mem);
891                 if (ret == -ENOSPC)
892                         continue;
893                 if (unlikely(ret))
894                         goto error;
895
896                 man = ttm_manager_type(bdev, mem->mem_type);
897                 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
898                 if (unlikely(ret)) {
899                         ttm_resource_free(bo, mem);
900                         if (ret == -EBUSY)
901                                 continue;
902
903                         goto error;
904                 }
905                 return 0;
906         }
907
908         for (i = 0; i < placement->num_busy_placement; ++i) {
909                 const struct ttm_place *place = &placement->busy_placement[i];
910
911                 ret = ttm_bo_mem_placement(bo, place, mem);
912                 if (ret)
913                         continue;
914
915                 type_found = true;
916                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
917                 if (likely(!ret))
918                         return 0;
919
920                 if (ret && ret != -EBUSY)
921                         goto error;
922         }
923
924         ret = -ENOMEM;
925         if (!type_found) {
926                 pr_err(TTM_PFX "No compatible memory type found\n");
927                 ret = -EINVAL;
928         }
929
930 error:
931         if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
932                 ttm_bo_move_to_lru_tail_unlocked(bo);
933
934         return ret;
935 }
936 EXPORT_SYMBOL(ttm_bo_mem_space);
937
938 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
939                                      struct ttm_resource *mem,
940                                      struct ttm_operation_ctx *ctx,
941                                      struct ttm_place *hop)
942 {
943         struct ttm_placement hop_placement;
944         int ret;
945         struct ttm_resource hop_mem = *mem;
946
947         hop_mem.mm_node = NULL;
948         hop_mem.mem_type = TTM_PL_SYSTEM;
949         hop_mem.placement = 0;
950
951         hop_placement.num_placement = hop_placement.num_busy_placement = 1;
952         hop_placement.placement = hop_placement.busy_placement = hop;
953
954         /* find space in the bounce domain */
955         ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
956         if (ret)
957                 return ret;
958         /* move to the bounce domain */
959         ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
960         if (ret)
961                 return ret;
962         return 0;
963 }
964
965 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
966                               struct ttm_placement *placement,
967                               struct ttm_operation_ctx *ctx)
968 {
969         int ret = 0;
970         struct ttm_place hop;
971         struct ttm_resource mem;
972
973         dma_resv_assert_held(bo->base.resv);
974
975         memset(&hop, 0, sizeof(hop));
976
977         mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
978         mem.page_alignment = bo->mem.page_alignment;
979         mem.bus.offset = 0;
980         mem.bus.addr = NULL;
981         mem.mm_node = NULL;
982
983         /*
984          * Determine where to move the buffer.
985          *
986          * If driver determines move is going to need
987          * an extra step then it will return -EMULTIHOP
988          * and the buffer will be moved to the temporary
989          * stop and the driver will be called to make
990          * the second hop.
991          */
992 bounce:
993         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
994         if (ret)
995                 return ret;
996         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
997         if (ret == -EMULTIHOP) {
998                 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
999                 if (ret)
1000                         return ret;
1001                 /* try and move to final place now. */
1002                 goto bounce;
1003         }
1004         if (ret)
1005                 ttm_resource_free(bo, &mem);
1006         return ret;
1007 }
1008
1009 static bool ttm_bo_places_compat(const struct ttm_place *places,
1010                                  unsigned num_placement,
1011                                  struct ttm_resource *mem,
1012                                  uint32_t *new_flags)
1013 {
1014         unsigned i;
1015
1016         for (i = 0; i < num_placement; i++) {
1017                 const struct ttm_place *heap = &places[i];
1018
1019                 if ((mem->start < heap->fpfn ||
1020                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1021                         continue;
1022
1023                 *new_flags = heap->flags;
1024                 if ((mem->mem_type == heap->mem_type) &&
1025                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1026                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1027                         return true;
1028         }
1029         return false;
1030 }
1031
1032 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1033                        struct ttm_resource *mem,
1034                        uint32_t *new_flags)
1035 {
1036         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1037                                  mem, new_flags))
1038                 return true;
1039
1040         if ((placement->busy_placement != placement->placement ||
1041              placement->num_busy_placement > placement->num_placement) &&
1042             ttm_bo_places_compat(placement->busy_placement,
1043                                  placement->num_busy_placement,
1044                                  mem, new_flags))
1045                 return true;
1046
1047         return false;
1048 }
1049 EXPORT_SYMBOL(ttm_bo_mem_compat);
1050
1051 int ttm_bo_validate(struct ttm_buffer_object *bo,
1052                     struct ttm_placement *placement,
1053                     struct ttm_operation_ctx *ctx)
1054 {
1055         int ret;
1056         uint32_t new_flags;
1057
1058         dma_resv_assert_held(bo->base.resv);
1059
1060         /*
1061          * Remove the backing store if no placement is given.
1062          */
1063         if (!placement->num_placement && !placement->num_busy_placement) {
1064                 ret = ttm_bo_pipeline_gutting(bo);
1065                 if (ret)
1066                         return ret;
1067
1068                 return ttm_tt_create(bo, false);
1069         }
1070
1071         /*
1072          * Check whether we need to move buffer.
1073          */
1074         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1075                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1076                 if (ret)
1077                         return ret;
1078         }
1079         /*
1080          * We might need to add a TTM.
1081          */
1082         if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1083                 ret = ttm_tt_create(bo, true);
1084                 if (ret)
1085                         return ret;
1086         }
1087         return 0;
1088 }
1089 EXPORT_SYMBOL(ttm_bo_validate);
1090
1091 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1092                          struct ttm_buffer_object *bo,
1093                          size_t size,
1094                          enum ttm_bo_type type,
1095                          struct ttm_placement *placement,
1096                          uint32_t page_alignment,
1097                          struct ttm_operation_ctx *ctx,
1098                          size_t acc_size,
1099                          struct sg_table *sg,
1100                          struct dma_resv *resv,
1101                          void (*destroy) (struct ttm_buffer_object *))
1102 {
1103         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1104         bool locked;
1105         int ret = 0;
1106
1107         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1108         if (ret) {
1109                 pr_err("Out of kernel memory\n");
1110                 if (destroy)
1111                         (*destroy)(bo);
1112                 else
1113                         kfree(bo);
1114                 return -ENOMEM;
1115         }
1116
1117         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1118
1119         kref_init(&bo->kref);
1120         INIT_LIST_HEAD(&bo->lru);
1121         INIT_LIST_HEAD(&bo->ddestroy);
1122         INIT_LIST_HEAD(&bo->swap);
1123         bo->bdev = bdev;
1124         bo->type = type;
1125         bo->mem.mem_type = TTM_PL_SYSTEM;
1126         bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1127         bo->mem.mm_node = NULL;
1128         bo->mem.page_alignment = page_alignment;
1129         bo->mem.bus.offset = 0;
1130         bo->mem.bus.addr = NULL;
1131         bo->moving = NULL;
1132         bo->mem.placement = 0;
1133         bo->acc_size = acc_size;
1134         bo->pin_count = 0;
1135         bo->sg = sg;
1136         if (resv) {
1137                 bo->base.resv = resv;
1138                 dma_resv_assert_held(bo->base.resv);
1139         } else {
1140                 bo->base.resv = &bo->base._resv;
1141         }
1142         if (!ttm_bo_uses_embedded_gem_object(bo)) {
1143                 /*
1144                  * bo.base is not initialized, so we have to setup the
1145                  * struct elements we want use regardless.
1146                  */
1147                 bo->base.size = size;
1148                 dma_resv_init(&bo->base._resv);
1149                 drm_vma_node_reset(&bo->base.vma_node);
1150         }
1151         atomic_inc(&ttm_bo_glob.bo_count);
1152
1153         /*
1154          * For ttm_bo_type_device buffers, allocate
1155          * address space from the device.
1156          */
1157         if (bo->type == ttm_bo_type_device ||
1158             bo->type == ttm_bo_type_sg)
1159                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1160                                          bo->mem.num_pages);
1161
1162         /* passed reservation objects should already be locked,
1163          * since otherwise lockdep will be angered in radeon.
1164          */
1165         if (!resv) {
1166                 locked = dma_resv_trylock(bo->base.resv);
1167                 WARN_ON(!locked);
1168         }
1169
1170         if (likely(!ret))
1171                 ret = ttm_bo_validate(bo, placement, ctx);
1172
1173         if (unlikely(ret)) {
1174                 if (!resv)
1175                         ttm_bo_unreserve(bo);
1176
1177                 ttm_bo_put(bo);
1178                 return ret;
1179         }
1180
1181         ttm_bo_move_to_lru_tail_unlocked(bo);
1182
1183         return ret;
1184 }
1185 EXPORT_SYMBOL(ttm_bo_init_reserved);
1186
1187 int ttm_bo_init(struct ttm_bo_device *bdev,
1188                 struct ttm_buffer_object *bo,
1189                 size_t size,
1190                 enum ttm_bo_type type,
1191                 struct ttm_placement *placement,
1192                 uint32_t page_alignment,
1193                 bool interruptible,
1194                 size_t acc_size,
1195                 struct sg_table *sg,
1196                 struct dma_resv *resv,
1197                 void (*destroy) (struct ttm_buffer_object *))
1198 {
1199         struct ttm_operation_ctx ctx = { interruptible, false };
1200         int ret;
1201
1202         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1203                                    page_alignment, &ctx, acc_size,
1204                                    sg, resv, destroy);
1205         if (ret)
1206                 return ret;
1207
1208         if (!resv)
1209                 ttm_bo_unreserve(bo);
1210
1211         return 0;
1212 }
1213 EXPORT_SYMBOL(ttm_bo_init);
1214
1215 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1216                            unsigned long bo_size,
1217                            unsigned struct_size)
1218 {
1219         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1220         size_t size = 0;
1221
1222         size += ttm_round_pot(struct_size);
1223         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1224         size += ttm_round_pot(sizeof(struct ttm_tt));
1225         return size;
1226 }
1227 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1228
1229 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1230 {
1231         struct ttm_bo_global *glob =
1232                 container_of(kobj, struct ttm_bo_global, kobj);
1233
1234         __free_page(glob->dummy_read_page);
1235 }
1236
1237 static void ttm_bo_global_release(void)
1238 {
1239         struct ttm_bo_global *glob = &ttm_bo_glob;
1240
1241         mutex_lock(&ttm_global_mutex);
1242         if (--ttm_bo_glob_use_count > 0)
1243                 goto out;
1244
1245         kobject_del(&glob->kobj);
1246         kobject_put(&glob->kobj);
1247         ttm_mem_global_release(&ttm_mem_glob);
1248         memset(glob, 0, sizeof(*glob));
1249 out:
1250         mutex_unlock(&ttm_global_mutex);
1251 }
1252
1253 static int ttm_bo_global_init(void)
1254 {
1255         struct ttm_bo_global *glob = &ttm_bo_glob;
1256         int ret = 0;
1257         unsigned i;
1258
1259         mutex_lock(&ttm_global_mutex);
1260         if (++ttm_bo_glob_use_count > 1)
1261                 goto out;
1262
1263         ret = ttm_mem_global_init(&ttm_mem_glob);
1264         if (ret)
1265                 goto out;
1266
1267         spin_lock_init(&glob->lru_lock);
1268         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1269
1270         if (unlikely(glob->dummy_read_page == NULL)) {
1271                 ret = -ENOMEM;
1272                 goto out;
1273         }
1274
1275         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1276                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1277         INIT_LIST_HEAD(&glob->device_list);
1278         atomic_set(&glob->bo_count, 0);
1279
1280         ret = kobject_init_and_add(
1281                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1282         if (unlikely(ret != 0))
1283                 kobject_put(&glob->kobj);
1284 out:
1285         mutex_unlock(&ttm_global_mutex);
1286         return ret;
1287 }
1288
1289 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1290 {
1291         struct ttm_bo_global *glob = &ttm_bo_glob;
1292         int ret = 0;
1293         unsigned i;
1294         struct ttm_resource_manager *man;
1295
1296         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1297         ttm_resource_manager_set_used(man, false);
1298         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1299
1300         mutex_lock(&ttm_global_mutex);
1301         list_del(&bdev->device_list);
1302         mutex_unlock(&ttm_global_mutex);
1303
1304         cancel_delayed_work_sync(&bdev->wq);
1305
1306         if (ttm_bo_delayed_delete(bdev, true))
1307                 pr_debug("Delayed destroy list was clean\n");
1308
1309         spin_lock(&glob->lru_lock);
1310         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1311                 if (list_empty(&man->lru[0]))
1312                         pr_debug("Swap list %d was clean\n", i);
1313         spin_unlock(&glob->lru_lock);
1314
1315         ttm_pool_fini(&bdev->pool);
1316
1317         if (!ret)
1318                 ttm_bo_global_release();
1319
1320         return ret;
1321 }
1322 EXPORT_SYMBOL(ttm_bo_device_release);
1323
1324 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1325 {
1326         struct ttm_resource_manager *man = &bdev->sysman;
1327
1328         /*
1329          * Initialize the system memory buffer type.
1330          * Other types need to be driver / IOCTL initialized.
1331          */
1332         man->use_tt = true;
1333
1334         ttm_resource_manager_init(man, 0);
1335         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1336         ttm_resource_manager_set_used(man, true);
1337 }
1338
1339 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1340                        struct ttm_bo_driver *driver,
1341                        struct device *dev,
1342                        struct address_space *mapping,
1343                        struct drm_vma_offset_manager *vma_manager,
1344                        bool use_dma_alloc, bool use_dma32)
1345 {
1346         struct ttm_bo_global *glob = &ttm_bo_glob;
1347         int ret;
1348
1349         if (WARN_ON(vma_manager == NULL))
1350                 return -EINVAL;
1351
1352         ret = ttm_bo_global_init();
1353         if (ret)
1354                 return ret;
1355
1356         bdev->driver = driver;
1357
1358         ttm_bo_init_sysman(bdev);
1359         ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
1360
1361         bdev->vma_manager = vma_manager;
1362         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1363         INIT_LIST_HEAD(&bdev->ddestroy);
1364         bdev->dev_mapping = mapping;
1365         mutex_lock(&ttm_global_mutex);
1366         list_add_tail(&bdev->device_list, &glob->device_list);
1367         mutex_unlock(&ttm_global_mutex);
1368
1369         return 0;
1370 }
1371 EXPORT_SYMBOL(ttm_bo_device_init);
1372
1373 /*
1374  * buffer object vm functions.
1375  */
1376
1377 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1378 {
1379         struct ttm_bo_device *bdev = bo->bdev;
1380
1381         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1382         ttm_mem_io_free(bdev, &bo->mem);
1383 }
1384 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1385
1386 int ttm_bo_wait(struct ttm_buffer_object *bo,
1387                 bool interruptible, bool no_wait)
1388 {
1389         long timeout = 15 * HZ;
1390
1391         if (no_wait) {
1392                 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1393                         return 0;
1394                 else
1395                         return -EBUSY;
1396         }
1397
1398         timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1399                                                       interruptible, timeout);
1400         if (timeout < 0)
1401                 return timeout;
1402
1403         if (timeout == 0)
1404                 return -EBUSY;
1405
1406         dma_resv_add_excl_fence(bo->base.resv, NULL);
1407         return 0;
1408 }
1409 EXPORT_SYMBOL(ttm_bo_wait);
1410
1411 /*
1412  * A buffer object shrink method that tries to swap out the first
1413  * buffer object on the bo_global::swap_lru list.
1414  */
1415 int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
1416 {
1417         struct ttm_bo_global *glob = &ttm_bo_glob;
1418         struct ttm_buffer_object *bo;
1419         int ret = -EBUSY;
1420         bool locked;
1421         unsigned i;
1422
1423         spin_lock(&glob->lru_lock);
1424         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1425                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1426                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1427                                                             NULL))
1428                                 continue;
1429
1430                         if (!ttm_bo_get_unless_zero(bo)) {
1431                                 if (locked)
1432                                         dma_resv_unlock(bo->base.resv);
1433                                 continue;
1434                         }
1435
1436                         ret = 0;
1437                         break;
1438                 }
1439                 if (!ret)
1440                         break;
1441         }
1442
1443         if (ret) {
1444                 spin_unlock(&glob->lru_lock);
1445                 return ret;
1446         }
1447
1448         if (bo->deleted) {
1449                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1450                 ttm_bo_put(bo);
1451                 return ret;
1452         }
1453
1454         ttm_bo_del_from_lru(bo);
1455         spin_unlock(&glob->lru_lock);
1456
1457         /**
1458          * Move to system cached
1459          */
1460
1461         if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1462                 struct ttm_operation_ctx ctx = { false, false };
1463                 struct ttm_resource evict_mem;
1464                 struct ttm_place hop;
1465
1466                 memset(&hop, 0, sizeof(hop));
1467
1468                 evict_mem = bo->mem;
1469                 evict_mem.mm_node = NULL;
1470                 evict_mem.placement = 0;
1471                 evict_mem.mem_type = TTM_PL_SYSTEM;
1472
1473                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1474                 if (unlikely(ret != 0)) {
1475                         WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1476                         goto out;
1477                 }
1478         }
1479
1480         /**
1481          * Make sure BO is idle.
1482          */
1483
1484         ret = ttm_bo_wait(bo, false, false);
1485         if (unlikely(ret != 0))
1486                 goto out;
1487
1488         ttm_bo_unmap_virtual(bo);
1489
1490         /**
1491          * Swap out. Buffer will be swapped in again as soon as
1492          * anyone tries to access a ttm page.
1493          */
1494
1495         if (bo->bdev->driver->swap_notify)
1496                 bo->bdev->driver->swap_notify(bo);
1497
1498         ret = ttm_tt_swapout(bo->bdev, bo->ttm);
1499 out:
1500
1501         /**
1502          *
1503          * Unreserve without putting on LRU to avoid swapping out an
1504          * already swapped buffer.
1505          */
1506         if (locked)
1507                 dma_resv_unlock(bo->base.resv);
1508         ttm_bo_put(bo);
1509         return ret;
1510 }
1511 EXPORT_SYMBOL(ttm_bo_swapout);
1512
1513 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1514 {
1515         if (bo->ttm == NULL)
1516                 return;
1517
1518         ttm_tt_destroy(bo->bdev, bo->ttm);
1519         bo->ttm = NULL;
1520 }
1521