c289a6a37ff9f33b1582e0736c4a1c175b0a0283
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44
45 #include "ttm_module.h"
46
47 /*
48  * ttm_global_mutex - protecting the global BO state
49  */
50 DEFINE_MUTEX(ttm_global_mutex);
51 unsigned ttm_bo_glob_use_count;
52 struct ttm_bo_global ttm_bo_glob;
53 EXPORT_SYMBOL(ttm_bo_glob);
54
55 /* default destructor */
56 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
57 {
58         kfree(bo);
59 }
60
61 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
62                                         struct ttm_placement *placement)
63 {
64         struct drm_printer p = drm_debug_printer(TTM_PFX);
65         struct ttm_resource_manager *man;
66         int i, mem_type;
67
68         drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
69                    bo, bo->mem.num_pages, bo->base.size >> 10,
70                    bo->base.size >> 20);
71         for (i = 0; i < placement->num_placement; i++) {
72                 mem_type = placement->placement[i].mem_type;
73                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
74                            i, placement->placement[i].flags, mem_type);
75                 man = ttm_manager_type(bo->bdev, mem_type);
76                 ttm_resource_manager_debug(man, &p);
77         }
78 }
79
80 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
81 {
82         struct ttm_bo_device *bdev = bo->bdev;
83
84         list_del_init(&bo->swap);
85         list_del_init(&bo->lru);
86
87         if (bdev->driver->del_from_lru_notify)
88                 bdev->driver->del_from_lru_notify(bo);
89 }
90
91 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
92                                      struct ttm_buffer_object *bo)
93 {
94         if (!pos->first)
95                 pos->first = bo;
96         pos->last = bo;
97 }
98
99 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
100                              struct ttm_resource *mem,
101                              struct ttm_lru_bulk_move *bulk)
102 {
103         struct ttm_bo_device *bdev = bo->bdev;
104         struct ttm_resource_manager *man;
105
106         dma_resv_assert_held(bo->base.resv);
107
108         if (bo->pin_count) {
109                 ttm_bo_del_from_lru(bo);
110                 return;
111         }
112
113         man = ttm_manager_type(bdev, mem->mem_type);
114         list_move_tail(&bo->lru, &man->lru[bo->priority]);
115         if (man->use_tt && bo->ttm &&
116             !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
117                                      TTM_PAGE_FLAG_SWAPPED))) {
118                 struct list_head *swap;
119
120                 swap = &ttm_bo_glob.swap_lru[bo->priority];
121                 list_move_tail(&bo->swap, swap);
122         }
123
124         if (bdev->driver->del_from_lru_notify)
125                 bdev->driver->del_from_lru_notify(bo);
126
127         if (bulk && !bo->pin_count) {
128                 switch (bo->mem.mem_type) {
129                 case TTM_PL_TT:
130                         ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
131                         break;
132
133                 case TTM_PL_VRAM:
134                         ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
135                         break;
136                 }
137                 if (bo->ttm && !(bo->ttm->page_flags &
138                                  (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
139                         ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
140         }
141 }
142 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
143
144 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
145 {
146         unsigned i;
147
148         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
149                 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
150                 struct ttm_resource_manager *man;
151
152                 if (!pos->first)
153                         continue;
154
155                 dma_resv_assert_held(pos->first->base.resv);
156                 dma_resv_assert_held(pos->last->base.resv);
157
158                 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
159                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
160                                     &pos->last->lru);
161         }
162
163         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
164                 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
165                 struct ttm_resource_manager *man;
166
167                 if (!pos->first)
168                         continue;
169
170                 dma_resv_assert_held(pos->first->base.resv);
171                 dma_resv_assert_held(pos->last->base.resv);
172
173                 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
174                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
175                                     &pos->last->lru);
176         }
177
178         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
179                 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
180                 struct list_head *lru;
181
182                 if (!pos->first)
183                         continue;
184
185                 dma_resv_assert_held(pos->first->base.resv);
186                 dma_resv_assert_held(pos->last->base.resv);
187
188                 lru = &ttm_bo_glob.swap_lru[i];
189                 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
190         }
191 }
192 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
193
194 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
195                                   struct ttm_resource *mem, bool evict,
196                                   struct ttm_operation_ctx *ctx,
197                                   struct ttm_place *hop)
198 {
199         struct ttm_bo_device *bdev = bo->bdev;
200         struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
201         struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
202         int ret;
203
204         ttm_bo_unmap_virtual(bo);
205
206         /*
207          * Create and bind a ttm if required.
208          */
209
210         if (new_man->use_tt) {
211                 /* Zero init the new TTM structure if the old location should
212                  * have used one as well.
213                  */
214                 ret = ttm_tt_create(bo, old_man->use_tt);
215                 if (ret)
216                         goto out_err;
217
218                 if (mem->mem_type != TTM_PL_SYSTEM) {
219                         ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
220                         if (ret)
221                                 goto out_err;
222                 }
223         }
224
225         ret = bdev->driver->move(bo, evict, ctx, mem, hop);
226         if (ret) {
227                 if (ret == -EMULTIHOP)
228                         return ret;
229                 goto out_err;
230         }
231
232         ctx->bytes_moved += bo->base.size;
233         return 0;
234
235 out_err:
236         new_man = ttm_manager_type(bdev, bo->mem.mem_type);
237         if (!new_man->use_tt)
238                 ttm_bo_tt_destroy(bo);
239
240         return ret;
241 }
242
243 /*
244  * Call bo::reserved.
245  * Will release GPU memory type usage on destruction.
246  * This is the place to put in driver specific hooks to release
247  * driver private resources.
248  * Will release the bo::reserved lock.
249  */
250
251 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
252 {
253         if (bo->bdev->driver->delete_mem_notify)
254                 bo->bdev->driver->delete_mem_notify(bo);
255
256         ttm_bo_tt_destroy(bo);
257         ttm_resource_free(bo, &bo->mem);
258 }
259
260 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
261 {
262         int r;
263
264         if (bo->base.resv == &bo->base._resv)
265                 return 0;
266
267         BUG_ON(!dma_resv_trylock(&bo->base._resv));
268
269         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
270         dma_resv_unlock(&bo->base._resv);
271         if (r)
272                 return r;
273
274         if (bo->type != ttm_bo_type_sg) {
275                 /* This works because the BO is about to be destroyed and nobody
276                  * reference it any more. The only tricky case is the trylock on
277                  * the resv object while holding the lru_lock.
278                  */
279                 spin_lock(&ttm_bo_glob.lru_lock);
280                 bo->base.resv = &bo->base._resv;
281                 spin_unlock(&ttm_bo_glob.lru_lock);
282         }
283
284         return r;
285 }
286
287 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
288 {
289         struct dma_resv *resv = &bo->base._resv;
290         struct dma_resv_list *fobj;
291         struct dma_fence *fence;
292         int i;
293
294         rcu_read_lock();
295         fobj = rcu_dereference(resv->fence);
296         fence = rcu_dereference(resv->fence_excl);
297         if (fence && !fence->ops->signaled)
298                 dma_fence_enable_sw_signaling(fence);
299
300         for (i = 0; fobj && i < fobj->shared_count; ++i) {
301                 fence = rcu_dereference(fobj->shared[i]);
302
303                 if (!fence->ops->signaled)
304                         dma_fence_enable_sw_signaling(fence);
305         }
306         rcu_read_unlock();
307 }
308
309 /**
310  * function ttm_bo_cleanup_refs
311  * If bo idle, remove from lru lists, and unref.
312  * If not idle, block if possible.
313  *
314  * Must be called with lru_lock and reservation held, this function
315  * will drop the lru lock and optionally the reservation lock before returning.
316  *
317  * @bo:                    The buffer object to clean-up
318  * @interruptible:         Any sleeps should occur interruptibly.
319  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
320  * @unlock_resv:           Unlock the reservation lock as well.
321  */
322
323 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
324                                bool interruptible, bool no_wait_gpu,
325                                bool unlock_resv)
326 {
327         struct dma_resv *resv = &bo->base._resv;
328         int ret;
329
330         if (dma_resv_test_signaled_rcu(resv, true))
331                 ret = 0;
332         else
333                 ret = -EBUSY;
334
335         if (ret && !no_wait_gpu) {
336                 long lret;
337
338                 if (unlock_resv)
339                         dma_resv_unlock(bo->base.resv);
340                 spin_unlock(&ttm_bo_glob.lru_lock);
341
342                 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
343                                                  30 * HZ);
344
345                 if (lret < 0)
346                         return lret;
347                 else if (lret == 0)
348                         return -EBUSY;
349
350                 spin_lock(&ttm_bo_glob.lru_lock);
351                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
352                         /*
353                          * We raced, and lost, someone else holds the reservation now,
354                          * and is probably busy in ttm_bo_cleanup_memtype_use.
355                          *
356                          * Even if it's not the case, because we finished waiting any
357                          * delayed destruction would succeed, so just return success
358                          * here.
359                          */
360                         spin_unlock(&ttm_bo_glob.lru_lock);
361                         return 0;
362                 }
363                 ret = 0;
364         }
365
366         if (ret || unlikely(list_empty(&bo->ddestroy))) {
367                 if (unlock_resv)
368                         dma_resv_unlock(bo->base.resv);
369                 spin_unlock(&ttm_bo_glob.lru_lock);
370                 return ret;
371         }
372
373         ttm_bo_del_from_lru(bo);
374         list_del_init(&bo->ddestroy);
375         spin_unlock(&ttm_bo_glob.lru_lock);
376         ttm_bo_cleanup_memtype_use(bo);
377
378         if (unlock_resv)
379                 dma_resv_unlock(bo->base.resv);
380
381         ttm_bo_put(bo);
382
383         return 0;
384 }
385
386 /*
387  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
388  * encountered buffers.
389  */
390 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
391 {
392         struct ttm_bo_global *glob = &ttm_bo_glob;
393         struct list_head removed;
394         bool empty;
395
396         INIT_LIST_HEAD(&removed);
397
398         spin_lock(&glob->lru_lock);
399         while (!list_empty(&bdev->ddestroy)) {
400                 struct ttm_buffer_object *bo;
401
402                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
403                                       ddestroy);
404                 list_move_tail(&bo->ddestroy, &removed);
405                 if (!ttm_bo_get_unless_zero(bo))
406                         continue;
407
408                 if (remove_all || bo->base.resv != &bo->base._resv) {
409                         spin_unlock(&glob->lru_lock);
410                         dma_resv_lock(bo->base.resv, NULL);
411
412                         spin_lock(&glob->lru_lock);
413                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
414
415                 } else if (dma_resv_trylock(bo->base.resv)) {
416                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
417                 } else {
418                         spin_unlock(&glob->lru_lock);
419                 }
420
421                 ttm_bo_put(bo);
422                 spin_lock(&glob->lru_lock);
423         }
424         list_splice_tail(&removed, &bdev->ddestroy);
425         empty = list_empty(&bdev->ddestroy);
426         spin_unlock(&glob->lru_lock);
427
428         return empty;
429 }
430
431 static void ttm_bo_delayed_workqueue(struct work_struct *work)
432 {
433         struct ttm_bo_device *bdev =
434             container_of(work, struct ttm_bo_device, wq.work);
435
436         if (!ttm_bo_delayed_delete(bdev, false))
437                 schedule_delayed_work(&bdev->wq,
438                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
439 }
440
441 static void ttm_bo_release(struct kref *kref)
442 {
443         struct ttm_buffer_object *bo =
444             container_of(kref, struct ttm_buffer_object, kref);
445         struct ttm_bo_device *bdev = bo->bdev;
446         size_t acc_size = bo->acc_size;
447         int ret;
448
449         if (!bo->deleted) {
450                 ret = ttm_bo_individualize_resv(bo);
451                 if (ret) {
452                         /* Last resort, if we fail to allocate memory for the
453                          * fences block for the BO to become idle
454                          */
455                         dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
456                                                   30 * HZ);
457                 }
458
459                 if (bo->bdev->driver->release_notify)
460                         bo->bdev->driver->release_notify(bo);
461
462                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
463                 ttm_mem_io_free(bdev, &bo->mem);
464         }
465
466         if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
467             !dma_resv_trylock(bo->base.resv)) {
468                 /* The BO is not idle, resurrect it for delayed destroy */
469                 ttm_bo_flush_all_fences(bo);
470                 bo->deleted = true;
471
472                 spin_lock(&ttm_bo_glob.lru_lock);
473
474                 /*
475                  * Make pinned bos immediately available to
476                  * shrinkers, now that they are queued for
477                  * destruction.
478                  */
479                 if (WARN_ON(bo->pin_count)) {
480                         bo->pin_count = 0;
481                         ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
482                 }
483
484                 kref_init(&bo->kref);
485                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
486                 spin_unlock(&ttm_bo_glob.lru_lock);
487
488                 schedule_delayed_work(&bdev->wq,
489                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
490                 return;
491         }
492
493         spin_lock(&ttm_bo_glob.lru_lock);
494         ttm_bo_del_from_lru(bo);
495         list_del(&bo->ddestroy);
496         spin_unlock(&ttm_bo_glob.lru_lock);
497
498         ttm_bo_cleanup_memtype_use(bo);
499         dma_resv_unlock(bo->base.resv);
500
501         atomic_dec(&ttm_bo_glob.bo_count);
502         dma_fence_put(bo->moving);
503         if (!ttm_bo_uses_embedded_gem_object(bo))
504                 dma_resv_fini(&bo->base._resv);
505         bo->destroy(bo);
506         ttm_mem_global_free(&ttm_mem_glob, acc_size);
507 }
508
509 void ttm_bo_put(struct ttm_buffer_object *bo)
510 {
511         kref_put(&bo->kref, ttm_bo_release);
512 }
513 EXPORT_SYMBOL(ttm_bo_put);
514
515 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
516 {
517         return cancel_delayed_work_sync(&bdev->wq);
518 }
519 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
520
521 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
522 {
523         if (resched)
524                 schedule_delayed_work(&bdev->wq,
525                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
526 }
527 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
528
529 static int ttm_bo_evict(struct ttm_buffer_object *bo,
530                         struct ttm_operation_ctx *ctx)
531 {
532         struct ttm_bo_device *bdev = bo->bdev;
533         struct ttm_resource evict_mem;
534         struct ttm_placement placement;
535         struct ttm_place hop;
536         int ret = 0;
537
538         memset(&hop, 0, sizeof(hop));
539
540         dma_resv_assert_held(bo->base.resv);
541
542         placement.num_placement = 0;
543         placement.num_busy_placement = 0;
544         bdev->driver->evict_flags(bo, &placement);
545
546         if (!placement.num_placement && !placement.num_busy_placement) {
547                 ttm_bo_wait(bo, false, false);
548
549                 ttm_bo_cleanup_memtype_use(bo);
550                 return ttm_tt_create(bo, false);
551         }
552
553         evict_mem = bo->mem;
554         evict_mem.mm_node = NULL;
555         evict_mem.bus.offset = 0;
556         evict_mem.bus.addr = NULL;
557
558         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
559         if (ret) {
560                 if (ret != -ERESTARTSYS) {
561                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
562                                bo);
563                         ttm_bo_mem_space_debug(bo, &placement);
564                 }
565                 goto out;
566         }
567
568         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
569         if (unlikely(ret)) {
570                 WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
571                 if (ret != -ERESTARTSYS)
572                         pr_err("Buffer eviction failed\n");
573                 ttm_resource_free(bo, &evict_mem);
574         }
575 out:
576         return ret;
577 }
578
579 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
580                               const struct ttm_place *place)
581 {
582         /* Don't evict this BO if it's outside of the
583          * requested placement range
584          */
585         if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
586             (place->lpfn && place->lpfn <= bo->mem.start))
587                 return false;
588
589         return true;
590 }
591 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
592
593 /*
594  * Check the target bo is allowable to be evicted or swapout, including cases:
595  *
596  * a. if share same reservation object with ctx->resv, have assumption
597  * reservation objects should already be locked, so not lock again and
598  * return true directly when either the opreation allow_reserved_eviction
599  * or the target bo already is in delayed free list;
600  *
601  * b. Otherwise, trylock it.
602  */
603 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
604                         struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
605 {
606         bool ret = false;
607
608         if (bo->base.resv == ctx->resv) {
609                 dma_resv_assert_held(bo->base.resv);
610                 if (ctx->allow_res_evict)
611                         ret = true;
612                 *locked = false;
613                 if (busy)
614                         *busy = false;
615         } else {
616                 ret = dma_resv_trylock(bo->base.resv);
617                 *locked = ret;
618                 if (busy)
619                         *busy = !ret;
620         }
621
622         return ret;
623 }
624
625 /**
626  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
627  *
628  * @busy_bo: BO which couldn't be locked with trylock
629  * @ctx: operation context
630  * @ticket: acquire ticket
631  *
632  * Try to lock a busy buffer object to avoid failing eviction.
633  */
634 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
635                                    struct ttm_operation_ctx *ctx,
636                                    struct ww_acquire_ctx *ticket)
637 {
638         int r;
639
640         if (!busy_bo || !ticket)
641                 return -EBUSY;
642
643         if (ctx->interruptible)
644                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
645                                                           ticket);
646         else
647                 r = dma_resv_lock(busy_bo->base.resv, ticket);
648
649         /*
650          * TODO: It would be better to keep the BO locked until allocation is at
651          * least tried one more time, but that would mean a much larger rework
652          * of TTM.
653          */
654         if (!r)
655                 dma_resv_unlock(busy_bo->base.resv);
656
657         return r == -EDEADLK ? -EBUSY : r;
658 }
659
660 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
661                         struct ttm_resource_manager *man,
662                         const struct ttm_place *place,
663                         struct ttm_operation_ctx *ctx,
664                         struct ww_acquire_ctx *ticket)
665 {
666         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
667         bool locked = false;
668         unsigned i;
669         int ret;
670
671         spin_lock(&ttm_bo_glob.lru_lock);
672         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
673                 list_for_each_entry(bo, &man->lru[i], lru) {
674                         bool busy;
675
676                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
677                                                             &busy)) {
678                                 if (busy && !busy_bo && ticket !=
679                                     dma_resv_locking_ctx(bo->base.resv))
680                                         busy_bo = bo;
681                                 continue;
682                         }
683
684                         if (place && !bdev->driver->eviction_valuable(bo,
685                                                                       place)) {
686                                 if (locked)
687                                         dma_resv_unlock(bo->base.resv);
688                                 continue;
689                         }
690                         if (!ttm_bo_get_unless_zero(bo)) {
691                                 if (locked)
692                                         dma_resv_unlock(bo->base.resv);
693                                 continue;
694                         }
695                         break;
696                 }
697
698                 /* If the inner loop terminated early, we have our candidate */
699                 if (&bo->lru != &man->lru[i])
700                         break;
701
702                 bo = NULL;
703         }
704
705         if (!bo) {
706                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
707                         busy_bo = NULL;
708                 spin_unlock(&ttm_bo_glob.lru_lock);
709                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
710                 if (busy_bo)
711                         ttm_bo_put(busy_bo);
712                 return ret;
713         }
714
715         if (bo->deleted) {
716                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
717                                           ctx->no_wait_gpu, locked);
718                 ttm_bo_put(bo);
719                 return ret;
720         }
721
722         spin_unlock(&ttm_bo_glob.lru_lock);
723
724         ret = ttm_bo_evict(bo, ctx);
725         if (locked)
726                 ttm_bo_unreserve(bo);
727
728         ttm_bo_put(bo);
729         return ret;
730 }
731
732 /*
733  * Add the last move fence to the BO and reserve a new shared slot.
734  */
735 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
736                                  struct ttm_resource_manager *man,
737                                  struct ttm_resource *mem,
738                                  bool no_wait_gpu)
739 {
740         struct dma_fence *fence;
741         int ret;
742
743         spin_lock(&man->move_lock);
744         fence = dma_fence_get(man->move);
745         spin_unlock(&man->move_lock);
746
747         if (!fence)
748                 return 0;
749
750         if (no_wait_gpu) {
751                 dma_fence_put(fence);
752                 return -EBUSY;
753         }
754
755         dma_resv_add_shared_fence(bo->base.resv, fence);
756
757         ret = dma_resv_reserve_shared(bo->base.resv, 1);
758         if (unlikely(ret)) {
759                 dma_fence_put(fence);
760                 return ret;
761         }
762
763         dma_fence_put(bo->moving);
764         bo->moving = fence;
765         return 0;
766 }
767
768 /*
769  * Repeatedly evict memory from the LRU for @mem_type until we create enough
770  * space, or we've evicted everything and there isn't enough space.
771  */
772 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
773                                   const struct ttm_place *place,
774                                   struct ttm_resource *mem,
775                                   struct ttm_operation_ctx *ctx)
776 {
777         struct ttm_bo_device *bdev = bo->bdev;
778         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
779         struct ww_acquire_ctx *ticket;
780         int ret;
781
782         ticket = dma_resv_locking_ctx(bo->base.resv);
783         do {
784                 ret = ttm_resource_alloc(bo, place, mem);
785                 if (likely(!ret))
786                         break;
787                 if (unlikely(ret != -ENOSPC))
788                         return ret;
789                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
790                                           ticket);
791                 if (unlikely(ret != 0))
792                         return ret;
793         } while (1);
794
795         return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
796 }
797
798 /**
799  * ttm_bo_mem_placement - check if placement is compatible
800  * @bo: BO to find memory for
801  * @place: where to search
802  * @mem: the memory object to fill in
803  *
804  * Check if placement is compatible and fill in mem structure.
805  * Returns -EBUSY if placement won't work or negative error code.
806  * 0 when placement can be used.
807  */
808 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
809                                 const struct ttm_place *place,
810                                 struct ttm_resource *mem)
811 {
812         struct ttm_bo_device *bdev = bo->bdev;
813         struct ttm_resource_manager *man;
814
815         man = ttm_manager_type(bdev, place->mem_type);
816         if (!man || !ttm_resource_manager_used(man))
817                 return -EBUSY;
818
819         mem->mem_type = place->mem_type;
820         mem->placement = place->flags;
821
822         spin_lock(&ttm_bo_glob.lru_lock);
823         ttm_bo_move_to_lru_tail(bo, mem, NULL);
824         spin_unlock(&ttm_bo_glob.lru_lock);
825
826         return 0;
827 }
828
829 /*
830  * Creates space for memory region @mem according to its type.
831  *
832  * This function first searches for free space in compatible memory types in
833  * the priority order defined by the driver.  If free space isn't found, then
834  * ttm_bo_mem_force_space is attempted in priority order to evict and find
835  * space.
836  */
837 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
838                         struct ttm_placement *placement,
839                         struct ttm_resource *mem,
840                         struct ttm_operation_ctx *ctx)
841 {
842         struct ttm_bo_device *bdev = bo->bdev;
843         bool type_found = false;
844         int i, ret;
845
846         ret = dma_resv_reserve_shared(bo->base.resv, 1);
847         if (unlikely(ret))
848                 return ret;
849
850         for (i = 0; i < placement->num_placement; ++i) {
851                 const struct ttm_place *place = &placement->placement[i];
852                 struct ttm_resource_manager *man;
853
854                 ret = ttm_bo_mem_placement(bo, place, mem);
855                 if (ret)
856                         continue;
857
858                 type_found = true;
859                 ret = ttm_resource_alloc(bo, place, mem);
860                 if (ret == -ENOSPC)
861                         continue;
862                 if (unlikely(ret))
863                         goto error;
864
865                 man = ttm_manager_type(bdev, mem->mem_type);
866                 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
867                 if (unlikely(ret)) {
868                         ttm_resource_free(bo, mem);
869                         if (ret == -EBUSY)
870                                 continue;
871
872                         goto error;
873                 }
874                 return 0;
875         }
876
877         for (i = 0; i < placement->num_busy_placement; ++i) {
878                 const struct ttm_place *place = &placement->busy_placement[i];
879
880                 ret = ttm_bo_mem_placement(bo, place, mem);
881                 if (ret)
882                         continue;
883
884                 type_found = true;
885                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
886                 if (likely(!ret))
887                         return 0;
888
889                 if (ret && ret != -EBUSY)
890                         goto error;
891         }
892
893         ret = -ENOMEM;
894         if (!type_found) {
895                 pr_err(TTM_PFX "No compatible memory type found\n");
896                 ret = -EINVAL;
897         }
898
899 error:
900         if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
901                 ttm_bo_move_to_lru_tail_unlocked(bo);
902
903         return ret;
904 }
905 EXPORT_SYMBOL(ttm_bo_mem_space);
906
907 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
908                                      struct ttm_resource *mem,
909                                      struct ttm_operation_ctx *ctx,
910                                      struct ttm_place *hop)
911 {
912         struct ttm_placement hop_placement;
913         int ret;
914         struct ttm_resource hop_mem = *mem;
915
916         hop_mem.mm_node = NULL;
917         hop_mem.mem_type = TTM_PL_SYSTEM;
918         hop_mem.placement = 0;
919
920         hop_placement.num_placement = hop_placement.num_busy_placement = 1;
921         hop_placement.placement = hop_placement.busy_placement = hop;
922
923         /* find space in the bounce domain */
924         ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
925         if (ret)
926                 return ret;
927         /* move to the bounce domain */
928         ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
929         if (ret)
930                 return ret;
931         return 0;
932 }
933
934 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
935                               struct ttm_placement *placement,
936                               struct ttm_operation_ctx *ctx)
937 {
938         int ret = 0;
939         struct ttm_place hop;
940         struct ttm_resource mem;
941
942         dma_resv_assert_held(bo->base.resv);
943
944         memset(&hop, 0, sizeof(hop));
945
946         mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
947         mem.page_alignment = bo->mem.page_alignment;
948         mem.bus.offset = 0;
949         mem.bus.addr = NULL;
950         mem.mm_node = NULL;
951
952         /*
953          * Determine where to move the buffer.
954          *
955          * If driver determines move is going to need
956          * an extra step then it will return -EMULTIHOP
957          * and the buffer will be moved to the temporary
958          * stop and the driver will be called to make
959          * the second hop.
960          */
961 bounce:
962         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
963         if (ret)
964                 return ret;
965         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
966         if (ret == -EMULTIHOP) {
967                 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
968                 if (ret)
969                         return ret;
970                 /* try and move to final place now. */
971                 goto bounce;
972         }
973         if (ret)
974                 ttm_resource_free(bo, &mem);
975         return ret;
976 }
977
978 static bool ttm_bo_places_compat(const struct ttm_place *places,
979                                  unsigned num_placement,
980                                  struct ttm_resource *mem,
981                                  uint32_t *new_flags)
982 {
983         unsigned i;
984
985         for (i = 0; i < num_placement; i++) {
986                 const struct ttm_place *heap = &places[i];
987
988                 if ((mem->start < heap->fpfn ||
989                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
990                         continue;
991
992                 *new_flags = heap->flags;
993                 if ((mem->mem_type == heap->mem_type) &&
994                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
995                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
996                         return true;
997         }
998         return false;
999 }
1000
1001 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1002                        struct ttm_resource *mem,
1003                        uint32_t *new_flags)
1004 {
1005         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1006                                  mem, new_flags))
1007                 return true;
1008
1009         if ((placement->busy_placement != placement->placement ||
1010              placement->num_busy_placement > placement->num_placement) &&
1011             ttm_bo_places_compat(placement->busy_placement,
1012                                  placement->num_busy_placement,
1013                                  mem, new_flags))
1014                 return true;
1015
1016         return false;
1017 }
1018 EXPORT_SYMBOL(ttm_bo_mem_compat);
1019
1020 int ttm_bo_validate(struct ttm_buffer_object *bo,
1021                     struct ttm_placement *placement,
1022                     struct ttm_operation_ctx *ctx)
1023 {
1024         int ret;
1025         uint32_t new_flags;
1026
1027         dma_resv_assert_held(bo->base.resv);
1028
1029         /*
1030          * Remove the backing store if no placement is given.
1031          */
1032         if (!placement->num_placement && !placement->num_busy_placement) {
1033                 ret = ttm_bo_pipeline_gutting(bo);
1034                 if (ret)
1035                         return ret;
1036
1037                 return ttm_tt_create(bo, false);
1038         }
1039
1040         /*
1041          * Check whether we need to move buffer.
1042          */
1043         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1044                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1045                 if (ret)
1046                         return ret;
1047         }
1048         /*
1049          * We might need to add a TTM.
1050          */
1051         if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1052                 ret = ttm_tt_create(bo, true);
1053                 if (ret)
1054                         return ret;
1055         }
1056         return 0;
1057 }
1058 EXPORT_SYMBOL(ttm_bo_validate);
1059
1060 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1061                          struct ttm_buffer_object *bo,
1062                          size_t size,
1063                          enum ttm_bo_type type,
1064                          struct ttm_placement *placement,
1065                          uint32_t page_alignment,
1066                          struct ttm_operation_ctx *ctx,
1067                          size_t acc_size,
1068                          struct sg_table *sg,
1069                          struct dma_resv *resv,
1070                          void (*destroy) (struct ttm_buffer_object *))
1071 {
1072         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1073         bool locked;
1074         int ret = 0;
1075
1076         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1077         if (ret) {
1078                 pr_err("Out of kernel memory\n");
1079                 if (destroy)
1080                         (*destroy)(bo);
1081                 else
1082                         kfree(bo);
1083                 return -ENOMEM;
1084         }
1085
1086         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1087
1088         kref_init(&bo->kref);
1089         INIT_LIST_HEAD(&bo->lru);
1090         INIT_LIST_HEAD(&bo->ddestroy);
1091         INIT_LIST_HEAD(&bo->swap);
1092         bo->bdev = bdev;
1093         bo->type = type;
1094         bo->mem.mem_type = TTM_PL_SYSTEM;
1095         bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1096         bo->mem.mm_node = NULL;
1097         bo->mem.page_alignment = page_alignment;
1098         bo->mem.bus.offset = 0;
1099         bo->mem.bus.addr = NULL;
1100         bo->moving = NULL;
1101         bo->mem.placement = 0;
1102         bo->acc_size = acc_size;
1103         bo->pin_count = 0;
1104         bo->sg = sg;
1105         if (resv) {
1106                 bo->base.resv = resv;
1107                 dma_resv_assert_held(bo->base.resv);
1108         } else {
1109                 bo->base.resv = &bo->base._resv;
1110         }
1111         if (!ttm_bo_uses_embedded_gem_object(bo)) {
1112                 /*
1113                  * bo.base is not initialized, so we have to setup the
1114                  * struct elements we want use regardless.
1115                  */
1116                 bo->base.size = size;
1117                 dma_resv_init(&bo->base._resv);
1118                 drm_vma_node_reset(&bo->base.vma_node);
1119         }
1120         atomic_inc(&ttm_bo_glob.bo_count);
1121
1122         /*
1123          * For ttm_bo_type_device buffers, allocate
1124          * address space from the device.
1125          */
1126         if (bo->type == ttm_bo_type_device ||
1127             bo->type == ttm_bo_type_sg)
1128                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1129                                          bo->mem.num_pages);
1130
1131         /* passed reservation objects should already be locked,
1132          * since otherwise lockdep will be angered in radeon.
1133          */
1134         if (!resv) {
1135                 locked = dma_resv_trylock(bo->base.resv);
1136                 WARN_ON(!locked);
1137         }
1138
1139         if (likely(!ret))
1140                 ret = ttm_bo_validate(bo, placement, ctx);
1141
1142         if (unlikely(ret)) {
1143                 if (!resv)
1144                         ttm_bo_unreserve(bo);
1145
1146                 ttm_bo_put(bo);
1147                 return ret;
1148         }
1149
1150         ttm_bo_move_to_lru_tail_unlocked(bo);
1151
1152         return ret;
1153 }
1154 EXPORT_SYMBOL(ttm_bo_init_reserved);
1155
1156 int ttm_bo_init(struct ttm_bo_device *bdev,
1157                 struct ttm_buffer_object *bo,
1158                 size_t size,
1159                 enum ttm_bo_type type,
1160                 struct ttm_placement *placement,
1161                 uint32_t page_alignment,
1162                 bool interruptible,
1163                 size_t acc_size,
1164                 struct sg_table *sg,
1165                 struct dma_resv *resv,
1166                 void (*destroy) (struct ttm_buffer_object *))
1167 {
1168         struct ttm_operation_ctx ctx = { interruptible, false };
1169         int ret;
1170
1171         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1172                                    page_alignment, &ctx, acc_size,
1173                                    sg, resv, destroy);
1174         if (ret)
1175                 return ret;
1176
1177         if (!resv)
1178                 ttm_bo_unreserve(bo);
1179
1180         return 0;
1181 }
1182 EXPORT_SYMBOL(ttm_bo_init);
1183
1184 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1185                            unsigned long bo_size,
1186                            unsigned struct_size)
1187 {
1188         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1189         size_t size = 0;
1190
1191         size += ttm_round_pot(struct_size);
1192         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1193         size += ttm_round_pot(sizeof(struct ttm_tt));
1194         return size;
1195 }
1196 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1197
1198 static void ttm_bo_global_release(void)
1199 {
1200         struct ttm_bo_global *glob = &ttm_bo_glob;
1201
1202         mutex_lock(&ttm_global_mutex);
1203         if (--ttm_bo_glob_use_count > 0)
1204                 goto out;
1205
1206         kobject_del(&glob->kobj);
1207         kobject_put(&glob->kobj);
1208         ttm_mem_global_release(&ttm_mem_glob);
1209         __free_page(glob->dummy_read_page);
1210         memset(glob, 0, sizeof(*glob));
1211 out:
1212         mutex_unlock(&ttm_global_mutex);
1213 }
1214
1215 static int ttm_bo_global_init(void)
1216 {
1217         struct ttm_bo_global *glob = &ttm_bo_glob;
1218         int ret = 0;
1219         unsigned i;
1220
1221         mutex_lock(&ttm_global_mutex);
1222         if (++ttm_bo_glob_use_count > 1)
1223                 goto out;
1224
1225         ret = ttm_mem_global_init(&ttm_mem_glob);
1226         if (ret)
1227                 goto out;
1228
1229         spin_lock_init(&glob->lru_lock);
1230         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1231
1232         if (unlikely(glob->dummy_read_page == NULL)) {
1233                 ret = -ENOMEM;
1234                 goto out;
1235         }
1236
1237         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1238                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1239         INIT_LIST_HEAD(&glob->device_list);
1240         atomic_set(&glob->bo_count, 0);
1241
1242         debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
1243                                 &glob->bo_count);
1244 out:
1245         mutex_unlock(&ttm_global_mutex);
1246         return ret;
1247 }
1248
1249 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1250 {
1251         struct ttm_bo_global *glob = &ttm_bo_glob;
1252         int ret = 0;
1253         unsigned i;
1254         struct ttm_resource_manager *man;
1255
1256         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1257         ttm_resource_manager_set_used(man, false);
1258         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1259
1260         mutex_lock(&ttm_global_mutex);
1261         list_del(&bdev->device_list);
1262         mutex_unlock(&ttm_global_mutex);
1263
1264         cancel_delayed_work_sync(&bdev->wq);
1265
1266         if (ttm_bo_delayed_delete(bdev, true))
1267                 pr_debug("Delayed destroy list was clean\n");
1268
1269         spin_lock(&glob->lru_lock);
1270         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1271                 if (list_empty(&man->lru[0]))
1272                         pr_debug("Swap list %d was clean\n", i);
1273         spin_unlock(&glob->lru_lock);
1274
1275         ttm_pool_fini(&bdev->pool);
1276
1277         if (!ret)
1278                 ttm_bo_global_release();
1279
1280         return ret;
1281 }
1282 EXPORT_SYMBOL(ttm_bo_device_release);
1283
1284 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1285 {
1286         struct ttm_resource_manager *man = &bdev->sysman;
1287
1288         /*
1289          * Initialize the system memory buffer type.
1290          * Other types need to be driver / IOCTL initialized.
1291          */
1292         man->use_tt = true;
1293
1294         ttm_resource_manager_init(man, 0);
1295         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1296         ttm_resource_manager_set_used(man, true);
1297 }
1298
1299 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1300                        struct ttm_bo_driver *driver,
1301                        struct device *dev,
1302                        struct address_space *mapping,
1303                        struct drm_vma_offset_manager *vma_manager,
1304                        bool use_dma_alloc, bool use_dma32)
1305 {
1306         struct ttm_bo_global *glob = &ttm_bo_glob;
1307         int ret;
1308
1309         if (WARN_ON(vma_manager == NULL))
1310                 return -EINVAL;
1311
1312         ret = ttm_bo_global_init();
1313         if (ret)
1314                 return ret;
1315
1316         bdev->driver = driver;
1317
1318         ttm_bo_init_sysman(bdev);
1319         ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
1320
1321         bdev->vma_manager = vma_manager;
1322         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1323         INIT_LIST_HEAD(&bdev->ddestroy);
1324         bdev->dev_mapping = mapping;
1325         mutex_lock(&ttm_global_mutex);
1326         list_add_tail(&bdev->device_list, &glob->device_list);
1327         mutex_unlock(&ttm_global_mutex);
1328
1329         return 0;
1330 }
1331 EXPORT_SYMBOL(ttm_bo_device_init);
1332
1333 /*
1334  * buffer object vm functions.
1335  */
1336
1337 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1338 {
1339         struct ttm_bo_device *bdev = bo->bdev;
1340
1341         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1342         ttm_mem_io_free(bdev, &bo->mem);
1343 }
1344 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1345
1346 int ttm_bo_wait(struct ttm_buffer_object *bo,
1347                 bool interruptible, bool no_wait)
1348 {
1349         long timeout = 15 * HZ;
1350
1351         if (no_wait) {
1352                 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1353                         return 0;
1354                 else
1355                         return -EBUSY;
1356         }
1357
1358         timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1359                                                       interruptible, timeout);
1360         if (timeout < 0)
1361                 return timeout;
1362
1363         if (timeout == 0)
1364                 return -EBUSY;
1365
1366         dma_resv_add_excl_fence(bo->base.resv, NULL);
1367         return 0;
1368 }
1369 EXPORT_SYMBOL(ttm_bo_wait);
1370
1371 /*
1372  * A buffer object shrink method that tries to swap out the first
1373  * buffer object on the bo_global::swap_lru list.
1374  */
1375 int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
1376 {
1377         struct ttm_bo_global *glob = &ttm_bo_glob;
1378         struct ttm_buffer_object *bo;
1379         int ret = -EBUSY;
1380         bool locked;
1381         unsigned i;
1382
1383         spin_lock(&glob->lru_lock);
1384         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1385                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1386                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1387                                                             NULL))
1388                                 continue;
1389
1390                         if (!ttm_bo_get_unless_zero(bo)) {
1391                                 if (locked)
1392                                         dma_resv_unlock(bo->base.resv);
1393                                 continue;
1394                         }
1395
1396                         ret = 0;
1397                         break;
1398                 }
1399                 if (!ret)
1400                         break;
1401         }
1402
1403         if (ret) {
1404                 spin_unlock(&glob->lru_lock);
1405                 return ret;
1406         }
1407
1408         if (bo->deleted) {
1409                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1410                 ttm_bo_put(bo);
1411                 return ret;
1412         }
1413
1414         ttm_bo_del_from_lru(bo);
1415         spin_unlock(&glob->lru_lock);
1416
1417         /**
1418          * Move to system cached
1419          */
1420
1421         if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1422                 struct ttm_operation_ctx ctx = { false, false };
1423                 struct ttm_resource evict_mem;
1424                 struct ttm_place hop;
1425
1426                 memset(&hop, 0, sizeof(hop));
1427
1428                 evict_mem = bo->mem;
1429                 evict_mem.mm_node = NULL;
1430                 evict_mem.placement = 0;
1431                 evict_mem.mem_type = TTM_PL_SYSTEM;
1432
1433                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1434                 if (unlikely(ret != 0)) {
1435                         WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1436                         goto out;
1437                 }
1438         }
1439
1440         /**
1441          * Make sure BO is idle.
1442          */
1443
1444         ret = ttm_bo_wait(bo, false, false);
1445         if (unlikely(ret != 0))
1446                 goto out;
1447
1448         ttm_bo_unmap_virtual(bo);
1449
1450         /**
1451          * Swap out. Buffer will be swapped in again as soon as
1452          * anyone tries to access a ttm page.
1453          */
1454
1455         if (bo->bdev->driver->swap_notify)
1456                 bo->bdev->driver->swap_notify(bo);
1457
1458         ret = ttm_tt_swapout(bo->bdev, bo->ttm);
1459 out:
1460
1461         /**
1462          *
1463          * Unreserve without putting on LRU to avoid swapping out an
1464          * already swapped buffer.
1465          */
1466         if (locked)
1467                 dma_resv_unlock(bo->base.resv);
1468         ttm_bo_put(bo);
1469         return ret;
1470 }
1471 EXPORT_SYMBOL(ttm_bo_swapout);
1472
1473 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1474 {
1475         if (bo->ttm == NULL)
1476                 return;
1477
1478         ttm_tt_destroy(bo->bdev, bo->ttm);
1479         bo->ttm = NULL;
1480 }
1481