c702ec5445f7bf768aa8906677b86f90fb7f265f
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
43 #include <linux/dma-resv.h>
44
45 #include "ttm_module.h"
46
47 static void ttm_bo_global_kobj_release(struct kobject *kobj);
48
49 /*
50  * ttm_global_mutex - protecting the global BO state
51  */
52 DEFINE_MUTEX(ttm_global_mutex);
53 unsigned ttm_bo_glob_use_count;
54 struct ttm_bo_global ttm_bo_glob;
55 EXPORT_SYMBOL(ttm_bo_glob);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 /* default destructor */
63 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
64 {
65         kfree(bo);
66 }
67
68 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
69                                         struct ttm_placement *placement)
70 {
71         struct drm_printer p = drm_debug_printer(TTM_PFX);
72         struct ttm_resource_manager *man;
73         int i, mem_type;
74
75         drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
76                    bo, bo->mem.num_pages, bo->base.size >> 10,
77                    bo->base.size >> 20);
78         for (i = 0; i < placement->num_placement; i++) {
79                 mem_type = placement->placement[i].mem_type;
80                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
81                            i, placement->placement[i].flags, mem_type);
82                 man = ttm_manager_type(bo->bdev, mem_type);
83                 ttm_resource_manager_debug(man, &p);
84         }
85 }
86
87 static ssize_t ttm_bo_global_show(struct kobject *kobj,
88                                   struct attribute *attr,
89                                   char *buffer)
90 {
91         struct ttm_bo_global *glob =
92                 container_of(kobj, struct ttm_bo_global, kobj);
93
94         return snprintf(buffer, PAGE_SIZE, "%d\n",
95                                 atomic_read(&glob->bo_count));
96 }
97
98 static struct attribute *ttm_bo_global_attrs[] = {
99         &ttm_bo_count,
100         NULL
101 };
102
103 static const struct sysfs_ops ttm_bo_global_ops = {
104         .show = &ttm_bo_global_show
105 };
106
107 static struct kobj_type ttm_bo_glob_kobj_type  = {
108         .release = &ttm_bo_global_kobj_release,
109         .sysfs_ops = &ttm_bo_global_ops,
110         .default_attrs = ttm_bo_global_attrs
111 };
112
113 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
114                                   struct ttm_resource *mem)
115 {
116         struct ttm_bo_device *bdev = bo->bdev;
117         struct ttm_resource_manager *man;
118
119         if (!list_empty(&bo->lru) || bo->pin_count)
120                 return;
121
122         man = ttm_manager_type(bdev, mem->mem_type);
123         list_add_tail(&bo->lru, &man->lru[bo->priority]);
124
125         if (man->use_tt && bo->ttm &&
126             !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
127                                      TTM_PAGE_FLAG_SWAPPED))) {
128                 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
129         }
130 }
131
132 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
133 {
134         struct ttm_bo_device *bdev = bo->bdev;
135         bool notify = false;
136
137         if (!list_empty(&bo->swap)) {
138                 list_del_init(&bo->swap);
139                 notify = true;
140         }
141         if (!list_empty(&bo->lru)) {
142                 list_del_init(&bo->lru);
143                 notify = true;
144         }
145
146         if (notify && bdev->driver->del_from_lru_notify)
147                 bdev->driver->del_from_lru_notify(bo);
148 }
149
150 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
151                                      struct ttm_buffer_object *bo)
152 {
153         if (!pos->first)
154                 pos->first = bo;
155         pos->last = bo;
156 }
157
158 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
159                              struct ttm_lru_bulk_move *bulk)
160 {
161         dma_resv_assert_held(bo->base.resv);
162
163         ttm_bo_del_from_lru(bo);
164         ttm_bo_add_mem_to_lru(bo, &bo->mem);
165
166         if (bulk && !bo->pin_count) {
167                 switch (bo->mem.mem_type) {
168                 case TTM_PL_TT:
169                         ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
170                         break;
171
172                 case TTM_PL_VRAM:
173                         ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
174                         break;
175                 }
176                 if (bo->ttm && !(bo->ttm->page_flags &
177                                  (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
178                         ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
179         }
180 }
181 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
182
183 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
184 {
185         unsigned i;
186
187         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
188                 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
189                 struct ttm_resource_manager *man;
190
191                 if (!pos->first)
192                         continue;
193
194                 dma_resv_assert_held(pos->first->base.resv);
195                 dma_resv_assert_held(pos->last->base.resv);
196
197                 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
198                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
199                                     &pos->last->lru);
200         }
201
202         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
203                 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
204                 struct ttm_resource_manager *man;
205
206                 if (!pos->first)
207                         continue;
208
209                 dma_resv_assert_held(pos->first->base.resv);
210                 dma_resv_assert_held(pos->last->base.resv);
211
212                 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
213                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
214                                     &pos->last->lru);
215         }
216
217         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
218                 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
219                 struct list_head *lru;
220
221                 if (!pos->first)
222                         continue;
223
224                 dma_resv_assert_held(pos->first->base.resv);
225                 dma_resv_assert_held(pos->last->base.resv);
226
227                 lru = &ttm_bo_glob.swap_lru[i];
228                 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
229         }
230 }
231 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
232
233 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
234                                   struct ttm_resource *mem, bool evict,
235                                   struct ttm_operation_ctx *ctx,
236                                   struct ttm_place *hop)
237 {
238         struct ttm_bo_device *bdev = bo->bdev;
239         struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
240         struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
241         int ret;
242
243         ttm_bo_unmap_virtual(bo);
244
245         /*
246          * Create and bind a ttm if required.
247          */
248
249         if (new_man->use_tt) {
250                 /* Zero init the new TTM structure if the old location should
251                  * have used one as well.
252                  */
253                 ret = ttm_tt_create(bo, old_man->use_tt);
254                 if (ret)
255                         goto out_err;
256
257                 if (mem->mem_type != TTM_PL_SYSTEM) {
258                         ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
259                         if (ret)
260                                 goto out_err;
261                 }
262         }
263
264         ret = bdev->driver->move(bo, evict, ctx, mem, hop);
265         if (ret) {
266                 if (ret == -EMULTIHOP)
267                         return ret;
268                 goto out_err;
269         }
270
271         ctx->bytes_moved += bo->base.size;
272         return 0;
273
274 out_err:
275         new_man = ttm_manager_type(bdev, bo->mem.mem_type);
276         if (!new_man->use_tt)
277                 ttm_bo_tt_destroy(bo);
278
279         return ret;
280 }
281
282 /*
283  * Call bo::reserved.
284  * Will release GPU memory type usage on destruction.
285  * This is the place to put in driver specific hooks to release
286  * driver private resources.
287  * Will release the bo::reserved lock.
288  */
289
290 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
291 {
292         if (bo->bdev->driver->delete_mem_notify)
293                 bo->bdev->driver->delete_mem_notify(bo);
294
295         ttm_bo_tt_destroy(bo);
296         ttm_resource_free(bo, &bo->mem);
297 }
298
299 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
300 {
301         int r;
302
303         if (bo->base.resv == &bo->base._resv)
304                 return 0;
305
306         BUG_ON(!dma_resv_trylock(&bo->base._resv));
307
308         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
309         dma_resv_unlock(&bo->base._resv);
310         if (r)
311                 return r;
312
313         if (bo->type != ttm_bo_type_sg) {
314                 /* This works because the BO is about to be destroyed and nobody
315                  * reference it any more. The only tricky case is the trylock on
316                  * the resv object while holding the lru_lock.
317                  */
318                 spin_lock(&ttm_bo_glob.lru_lock);
319                 bo->base.resv = &bo->base._resv;
320                 spin_unlock(&ttm_bo_glob.lru_lock);
321         }
322
323         return r;
324 }
325
326 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
327 {
328         struct dma_resv *resv = &bo->base._resv;
329         struct dma_resv_list *fobj;
330         struct dma_fence *fence;
331         int i;
332
333         rcu_read_lock();
334         fobj = rcu_dereference(resv->fence);
335         fence = rcu_dereference(resv->fence_excl);
336         if (fence && !fence->ops->signaled)
337                 dma_fence_enable_sw_signaling(fence);
338
339         for (i = 0; fobj && i < fobj->shared_count; ++i) {
340                 fence = rcu_dereference(fobj->shared[i]);
341
342                 if (!fence->ops->signaled)
343                         dma_fence_enable_sw_signaling(fence);
344         }
345         rcu_read_unlock();
346 }
347
348 /**
349  * function ttm_bo_cleanup_refs
350  * If bo idle, remove from lru lists, and unref.
351  * If not idle, block if possible.
352  *
353  * Must be called with lru_lock and reservation held, this function
354  * will drop the lru lock and optionally the reservation lock before returning.
355  *
356  * @bo:                    The buffer object to clean-up
357  * @interruptible:         Any sleeps should occur interruptibly.
358  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
359  * @unlock_resv:           Unlock the reservation lock as well.
360  */
361
362 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
363                                bool interruptible, bool no_wait_gpu,
364                                bool unlock_resv)
365 {
366         struct dma_resv *resv = &bo->base._resv;
367         int ret;
368
369         if (dma_resv_test_signaled_rcu(resv, true))
370                 ret = 0;
371         else
372                 ret = -EBUSY;
373
374         if (ret && !no_wait_gpu) {
375                 long lret;
376
377                 if (unlock_resv)
378                         dma_resv_unlock(bo->base.resv);
379                 spin_unlock(&ttm_bo_glob.lru_lock);
380
381                 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
382                                                  30 * HZ);
383
384                 if (lret < 0)
385                         return lret;
386                 else if (lret == 0)
387                         return -EBUSY;
388
389                 spin_lock(&ttm_bo_glob.lru_lock);
390                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
391                         /*
392                          * We raced, and lost, someone else holds the reservation now,
393                          * and is probably busy in ttm_bo_cleanup_memtype_use.
394                          *
395                          * Even if it's not the case, because we finished waiting any
396                          * delayed destruction would succeed, so just return success
397                          * here.
398                          */
399                         spin_unlock(&ttm_bo_glob.lru_lock);
400                         return 0;
401                 }
402                 ret = 0;
403         }
404
405         if (ret || unlikely(list_empty(&bo->ddestroy))) {
406                 if (unlock_resv)
407                         dma_resv_unlock(bo->base.resv);
408                 spin_unlock(&ttm_bo_glob.lru_lock);
409                 return ret;
410         }
411
412         ttm_bo_del_from_lru(bo);
413         list_del_init(&bo->ddestroy);
414         spin_unlock(&ttm_bo_glob.lru_lock);
415         ttm_bo_cleanup_memtype_use(bo);
416
417         if (unlock_resv)
418                 dma_resv_unlock(bo->base.resv);
419
420         ttm_bo_put(bo);
421
422         return 0;
423 }
424
425 /*
426  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
427  * encountered buffers.
428  */
429 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
430 {
431         struct ttm_bo_global *glob = &ttm_bo_glob;
432         struct list_head removed;
433         bool empty;
434
435         INIT_LIST_HEAD(&removed);
436
437         spin_lock(&glob->lru_lock);
438         while (!list_empty(&bdev->ddestroy)) {
439                 struct ttm_buffer_object *bo;
440
441                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
442                                       ddestroy);
443                 list_move_tail(&bo->ddestroy, &removed);
444                 if (!ttm_bo_get_unless_zero(bo))
445                         continue;
446
447                 if (remove_all || bo->base.resv != &bo->base._resv) {
448                         spin_unlock(&glob->lru_lock);
449                         dma_resv_lock(bo->base.resv, NULL);
450
451                         spin_lock(&glob->lru_lock);
452                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
453
454                 } else if (dma_resv_trylock(bo->base.resv)) {
455                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
456                 } else {
457                         spin_unlock(&glob->lru_lock);
458                 }
459
460                 ttm_bo_put(bo);
461                 spin_lock(&glob->lru_lock);
462         }
463         list_splice_tail(&removed, &bdev->ddestroy);
464         empty = list_empty(&bdev->ddestroy);
465         spin_unlock(&glob->lru_lock);
466
467         return empty;
468 }
469
470 static void ttm_bo_delayed_workqueue(struct work_struct *work)
471 {
472         struct ttm_bo_device *bdev =
473             container_of(work, struct ttm_bo_device, wq.work);
474
475         if (!ttm_bo_delayed_delete(bdev, false))
476                 schedule_delayed_work(&bdev->wq,
477                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
478 }
479
480 static void ttm_bo_release(struct kref *kref)
481 {
482         struct ttm_buffer_object *bo =
483             container_of(kref, struct ttm_buffer_object, kref);
484         struct ttm_bo_device *bdev = bo->bdev;
485         size_t acc_size = bo->acc_size;
486         int ret;
487
488         if (!bo->deleted) {
489                 ret = ttm_bo_individualize_resv(bo);
490                 if (ret) {
491                         /* Last resort, if we fail to allocate memory for the
492                          * fences block for the BO to become idle
493                          */
494                         dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
495                                                   30 * HZ);
496                 }
497
498                 if (bo->bdev->driver->release_notify)
499                         bo->bdev->driver->release_notify(bo);
500
501                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
502                 ttm_mem_io_free(bdev, &bo->mem);
503         }
504
505         if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
506             !dma_resv_trylock(bo->base.resv)) {
507                 /* The BO is not idle, resurrect it for delayed destroy */
508                 ttm_bo_flush_all_fences(bo);
509                 bo->deleted = true;
510
511                 spin_lock(&ttm_bo_glob.lru_lock);
512
513                 /*
514                  * Make pinned bos immediately available to
515                  * shrinkers, now that they are queued for
516                  * destruction.
517                  */
518                 if (WARN_ON(bo->pin_count)) {
519                         bo->pin_count = 0;
520                         ttm_bo_del_from_lru(bo);
521                         ttm_bo_add_mem_to_lru(bo, &bo->mem);
522                 }
523
524                 kref_init(&bo->kref);
525                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
526                 spin_unlock(&ttm_bo_glob.lru_lock);
527
528                 schedule_delayed_work(&bdev->wq,
529                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
530                 return;
531         }
532
533         spin_lock(&ttm_bo_glob.lru_lock);
534         ttm_bo_del_from_lru(bo);
535         list_del(&bo->ddestroy);
536         spin_unlock(&ttm_bo_glob.lru_lock);
537
538         ttm_bo_cleanup_memtype_use(bo);
539         dma_resv_unlock(bo->base.resv);
540
541         atomic_dec(&ttm_bo_glob.bo_count);
542         dma_fence_put(bo->moving);
543         if (!ttm_bo_uses_embedded_gem_object(bo))
544                 dma_resv_fini(&bo->base._resv);
545         bo->destroy(bo);
546         ttm_mem_global_free(&ttm_mem_glob, acc_size);
547 }
548
549 void ttm_bo_put(struct ttm_buffer_object *bo)
550 {
551         kref_put(&bo->kref, ttm_bo_release);
552 }
553 EXPORT_SYMBOL(ttm_bo_put);
554
555 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
556 {
557         return cancel_delayed_work_sync(&bdev->wq);
558 }
559 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
560
561 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
562 {
563         if (resched)
564                 schedule_delayed_work(&bdev->wq,
565                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
566 }
567 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
568
569 static int ttm_bo_evict(struct ttm_buffer_object *bo,
570                         struct ttm_operation_ctx *ctx)
571 {
572         struct ttm_bo_device *bdev = bo->bdev;
573         struct ttm_resource evict_mem;
574         struct ttm_placement placement;
575         struct ttm_place hop;
576         int ret = 0;
577
578         memset(&hop, 0, sizeof(hop));
579
580         dma_resv_assert_held(bo->base.resv);
581
582         placement.num_placement = 0;
583         placement.num_busy_placement = 0;
584         bdev->driver->evict_flags(bo, &placement);
585
586         if (!placement.num_placement && !placement.num_busy_placement) {
587                 ttm_bo_wait(bo, false, false);
588
589                 ttm_bo_cleanup_memtype_use(bo);
590                 return ttm_tt_create(bo, false);
591         }
592
593         evict_mem = bo->mem;
594         evict_mem.mm_node = NULL;
595         evict_mem.bus.offset = 0;
596         evict_mem.bus.addr = NULL;
597
598         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
599         if (ret) {
600                 if (ret != -ERESTARTSYS) {
601                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
602                                bo);
603                         ttm_bo_mem_space_debug(bo, &placement);
604                 }
605                 goto out;
606         }
607
608         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
609         if (unlikely(ret)) {
610                 WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
611                 if (ret != -ERESTARTSYS)
612                         pr_err("Buffer eviction failed\n");
613                 ttm_resource_free(bo, &evict_mem);
614         }
615 out:
616         return ret;
617 }
618
619 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
620                               const struct ttm_place *place)
621 {
622         /* Don't evict this BO if it's outside of the
623          * requested placement range
624          */
625         if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
626             (place->lpfn && place->lpfn <= bo->mem.start))
627                 return false;
628
629         return true;
630 }
631 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
632
633 /*
634  * Check the target bo is allowable to be evicted or swapout, including cases:
635  *
636  * a. if share same reservation object with ctx->resv, have assumption
637  * reservation objects should already be locked, so not lock again and
638  * return true directly when either the opreation allow_reserved_eviction
639  * or the target bo already is in delayed free list;
640  *
641  * b. Otherwise, trylock it.
642  */
643 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
644                         struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
645 {
646         bool ret = false;
647
648         if (bo->base.resv == ctx->resv) {
649                 dma_resv_assert_held(bo->base.resv);
650                 if (ctx->allow_res_evict)
651                         ret = true;
652                 *locked = false;
653                 if (busy)
654                         *busy = false;
655         } else {
656                 ret = dma_resv_trylock(bo->base.resv);
657                 *locked = ret;
658                 if (busy)
659                         *busy = !ret;
660         }
661
662         return ret;
663 }
664
665 /**
666  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
667  *
668  * @busy_bo: BO which couldn't be locked with trylock
669  * @ctx: operation context
670  * @ticket: acquire ticket
671  *
672  * Try to lock a busy buffer object to avoid failing eviction.
673  */
674 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
675                                    struct ttm_operation_ctx *ctx,
676                                    struct ww_acquire_ctx *ticket)
677 {
678         int r;
679
680         if (!busy_bo || !ticket)
681                 return -EBUSY;
682
683         if (ctx->interruptible)
684                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
685                                                           ticket);
686         else
687                 r = dma_resv_lock(busy_bo->base.resv, ticket);
688
689         /*
690          * TODO: It would be better to keep the BO locked until allocation is at
691          * least tried one more time, but that would mean a much larger rework
692          * of TTM.
693          */
694         if (!r)
695                 dma_resv_unlock(busy_bo->base.resv);
696
697         return r == -EDEADLK ? -EBUSY : r;
698 }
699
700 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
701                         struct ttm_resource_manager *man,
702                         const struct ttm_place *place,
703                         struct ttm_operation_ctx *ctx,
704                         struct ww_acquire_ctx *ticket)
705 {
706         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
707         bool locked = false;
708         unsigned i;
709         int ret;
710
711         spin_lock(&ttm_bo_glob.lru_lock);
712         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
713                 list_for_each_entry(bo, &man->lru[i], lru) {
714                         bool busy;
715
716                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
717                                                             &busy)) {
718                                 if (busy && !busy_bo && ticket !=
719                                     dma_resv_locking_ctx(bo->base.resv))
720                                         busy_bo = bo;
721                                 continue;
722                         }
723
724                         if (place && !bdev->driver->eviction_valuable(bo,
725                                                                       place)) {
726                                 if (locked)
727                                         dma_resv_unlock(bo->base.resv);
728                                 continue;
729                         }
730                         if (!ttm_bo_get_unless_zero(bo)) {
731                                 if (locked)
732                                         dma_resv_unlock(bo->base.resv);
733                                 continue;
734                         }
735                         break;
736                 }
737
738                 /* If the inner loop terminated early, we have our candidate */
739                 if (&bo->lru != &man->lru[i])
740                         break;
741
742                 bo = NULL;
743         }
744
745         if (!bo) {
746                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
747                         busy_bo = NULL;
748                 spin_unlock(&ttm_bo_glob.lru_lock);
749                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
750                 if (busy_bo)
751                         ttm_bo_put(busy_bo);
752                 return ret;
753         }
754
755         if (bo->deleted) {
756                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
757                                           ctx->no_wait_gpu, locked);
758                 ttm_bo_put(bo);
759                 return ret;
760         }
761
762         spin_unlock(&ttm_bo_glob.lru_lock);
763
764         ret = ttm_bo_evict(bo, ctx);
765         if (locked)
766                 ttm_bo_unreserve(bo);
767
768         ttm_bo_put(bo);
769         return ret;
770 }
771
772 /*
773  * Add the last move fence to the BO and reserve a new shared slot.
774  */
775 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
776                                  struct ttm_resource_manager *man,
777                                  struct ttm_resource *mem,
778                                  bool no_wait_gpu)
779 {
780         struct dma_fence *fence;
781         int ret;
782
783         spin_lock(&man->move_lock);
784         fence = dma_fence_get(man->move);
785         spin_unlock(&man->move_lock);
786
787         if (!fence)
788                 return 0;
789
790         if (no_wait_gpu) {
791                 dma_fence_put(fence);
792                 return -EBUSY;
793         }
794
795         dma_resv_add_shared_fence(bo->base.resv, fence);
796
797         ret = dma_resv_reserve_shared(bo->base.resv, 1);
798         if (unlikely(ret)) {
799                 dma_fence_put(fence);
800                 return ret;
801         }
802
803         dma_fence_put(bo->moving);
804         bo->moving = fence;
805         return 0;
806 }
807
808 /*
809  * Repeatedly evict memory from the LRU for @mem_type until we create enough
810  * space, or we've evicted everything and there isn't enough space.
811  */
812 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
813                                   const struct ttm_place *place,
814                                   struct ttm_resource *mem,
815                                   struct ttm_operation_ctx *ctx)
816 {
817         struct ttm_bo_device *bdev = bo->bdev;
818         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
819         struct ww_acquire_ctx *ticket;
820         int ret;
821
822         ticket = dma_resv_locking_ctx(bo->base.resv);
823         do {
824                 ret = ttm_resource_alloc(bo, place, mem);
825                 if (likely(!ret))
826                         break;
827                 if (unlikely(ret != -ENOSPC))
828                         return ret;
829                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
830                                           ticket);
831                 if (unlikely(ret != 0))
832                         return ret;
833         } while (1);
834
835         return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
836 }
837
838 /**
839  * ttm_bo_mem_placement - check if placement is compatible
840  * @bo: BO to find memory for
841  * @place: where to search
842  * @mem: the memory object to fill in
843  *
844  * Check if placement is compatible and fill in mem structure.
845  * Returns -EBUSY if placement won't work or negative error code.
846  * 0 when placement can be used.
847  */
848 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
849                                 const struct ttm_place *place,
850                                 struct ttm_resource *mem)
851 {
852         struct ttm_bo_device *bdev = bo->bdev;
853         struct ttm_resource_manager *man;
854
855         man = ttm_manager_type(bdev, place->mem_type);
856         if (!man || !ttm_resource_manager_used(man))
857                 return -EBUSY;
858
859         mem->mem_type = place->mem_type;
860         mem->placement = place->flags;
861
862         spin_lock(&ttm_bo_glob.lru_lock);
863         ttm_bo_del_from_lru(bo);
864         ttm_bo_add_mem_to_lru(bo, mem);
865         spin_unlock(&ttm_bo_glob.lru_lock);
866
867         return 0;
868 }
869
870 /*
871  * Creates space for memory region @mem according to its type.
872  *
873  * This function first searches for free space in compatible memory types in
874  * the priority order defined by the driver.  If free space isn't found, then
875  * ttm_bo_mem_force_space is attempted in priority order to evict and find
876  * space.
877  */
878 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
879                         struct ttm_placement *placement,
880                         struct ttm_resource *mem,
881                         struct ttm_operation_ctx *ctx)
882 {
883         struct ttm_bo_device *bdev = bo->bdev;
884         bool type_found = false;
885         int i, ret;
886
887         ret = dma_resv_reserve_shared(bo->base.resv, 1);
888         if (unlikely(ret))
889                 return ret;
890
891         for (i = 0; i < placement->num_placement; ++i) {
892                 const struct ttm_place *place = &placement->placement[i];
893                 struct ttm_resource_manager *man;
894
895                 ret = ttm_bo_mem_placement(bo, place, mem);
896                 if (ret)
897                         continue;
898
899                 type_found = true;
900                 ret = ttm_resource_alloc(bo, place, mem);
901                 if (ret == -ENOSPC)
902                         continue;
903                 if (unlikely(ret))
904                         goto error;
905
906                 man = ttm_manager_type(bdev, mem->mem_type);
907                 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
908                 if (unlikely(ret)) {
909                         ttm_resource_free(bo, mem);
910                         if (ret == -EBUSY)
911                                 continue;
912
913                         goto error;
914                 }
915                 return 0;
916         }
917
918         for (i = 0; i < placement->num_busy_placement; ++i) {
919                 const struct ttm_place *place = &placement->busy_placement[i];
920
921                 ret = ttm_bo_mem_placement(bo, place, mem);
922                 if (ret)
923                         continue;
924
925                 type_found = true;
926                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
927                 if (likely(!ret))
928                         return 0;
929
930                 if (ret && ret != -EBUSY)
931                         goto error;
932         }
933
934         ret = -ENOMEM;
935         if (!type_found) {
936                 pr_err(TTM_PFX "No compatible memory type found\n");
937                 ret = -EINVAL;
938         }
939
940 error:
941         if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
942                 ttm_bo_move_to_lru_tail_unlocked(bo);
943
944         return ret;
945 }
946 EXPORT_SYMBOL(ttm_bo_mem_space);
947
948 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
949                                      struct ttm_resource *mem,
950                                      struct ttm_operation_ctx *ctx,
951                                      struct ttm_place *hop)
952 {
953         struct ttm_placement hop_placement;
954         int ret;
955         struct ttm_resource hop_mem = *mem;
956
957         hop_mem.mm_node = NULL;
958         hop_mem.mem_type = TTM_PL_SYSTEM;
959         hop_mem.placement = 0;
960
961         hop_placement.num_placement = hop_placement.num_busy_placement = 1;
962         hop_placement.placement = hop_placement.busy_placement = hop;
963
964         /* find space in the bounce domain */
965         ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
966         if (ret)
967                 return ret;
968         /* move to the bounce domain */
969         ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
970         if (ret)
971                 return ret;
972         return 0;
973 }
974
975 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
976                               struct ttm_placement *placement,
977                               struct ttm_operation_ctx *ctx)
978 {
979         int ret = 0;
980         struct ttm_place hop;
981         struct ttm_resource mem;
982
983         dma_resv_assert_held(bo->base.resv);
984
985         memset(&hop, 0, sizeof(hop));
986
987         mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
988         mem.page_alignment = bo->mem.page_alignment;
989         mem.bus.offset = 0;
990         mem.bus.addr = NULL;
991         mem.mm_node = NULL;
992
993         /*
994          * Determine where to move the buffer.
995          *
996          * If driver determines move is going to need
997          * an extra step then it will return -EMULTIHOP
998          * and the buffer will be moved to the temporary
999          * stop and the driver will be called to make
1000          * the second hop.
1001          */
1002 bounce:
1003         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1004         if (ret)
1005                 return ret;
1006         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
1007         if (ret == -EMULTIHOP) {
1008                 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
1009                 if (ret)
1010                         return ret;
1011                 /* try and move to final place now. */
1012                 goto bounce;
1013         }
1014         if (ret)
1015                 ttm_resource_free(bo, &mem);
1016         return ret;
1017 }
1018
1019 static bool ttm_bo_places_compat(const struct ttm_place *places,
1020                                  unsigned num_placement,
1021                                  struct ttm_resource *mem,
1022                                  uint32_t *new_flags)
1023 {
1024         unsigned i;
1025
1026         for (i = 0; i < num_placement; i++) {
1027                 const struct ttm_place *heap = &places[i];
1028
1029                 if ((mem->start < heap->fpfn ||
1030                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1031                         continue;
1032
1033                 *new_flags = heap->flags;
1034                 if ((mem->mem_type == heap->mem_type) &&
1035                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1036                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1037                         return true;
1038         }
1039         return false;
1040 }
1041
1042 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1043                        struct ttm_resource *mem,
1044                        uint32_t *new_flags)
1045 {
1046         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1047                                  mem, new_flags))
1048                 return true;
1049
1050         if ((placement->busy_placement != placement->placement ||
1051              placement->num_busy_placement > placement->num_placement) &&
1052             ttm_bo_places_compat(placement->busy_placement,
1053                                  placement->num_busy_placement,
1054                                  mem, new_flags))
1055                 return true;
1056
1057         return false;
1058 }
1059 EXPORT_SYMBOL(ttm_bo_mem_compat);
1060
1061 int ttm_bo_validate(struct ttm_buffer_object *bo,
1062                     struct ttm_placement *placement,
1063                     struct ttm_operation_ctx *ctx)
1064 {
1065         int ret;
1066         uint32_t new_flags;
1067
1068         dma_resv_assert_held(bo->base.resv);
1069
1070         /*
1071          * Remove the backing store if no placement is given.
1072          */
1073         if (!placement->num_placement && !placement->num_busy_placement) {
1074                 ret = ttm_bo_pipeline_gutting(bo);
1075                 if (ret)
1076                         return ret;
1077
1078                 return ttm_tt_create(bo, false);
1079         }
1080
1081         /*
1082          * Check whether we need to move buffer.
1083          */
1084         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1085                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1086                 if (ret)
1087                         return ret;
1088         }
1089         /*
1090          * We might need to add a TTM.
1091          */
1092         if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1093                 ret = ttm_tt_create(bo, true);
1094                 if (ret)
1095                         return ret;
1096         }
1097         return 0;
1098 }
1099 EXPORT_SYMBOL(ttm_bo_validate);
1100
1101 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1102                          struct ttm_buffer_object *bo,
1103                          size_t size,
1104                          enum ttm_bo_type type,
1105                          struct ttm_placement *placement,
1106                          uint32_t page_alignment,
1107                          struct ttm_operation_ctx *ctx,
1108                          size_t acc_size,
1109                          struct sg_table *sg,
1110                          struct dma_resv *resv,
1111                          void (*destroy) (struct ttm_buffer_object *))
1112 {
1113         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1114         bool locked;
1115         int ret = 0;
1116
1117         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1118         if (ret) {
1119                 pr_err("Out of kernel memory\n");
1120                 if (destroy)
1121                         (*destroy)(bo);
1122                 else
1123                         kfree(bo);
1124                 return -ENOMEM;
1125         }
1126
1127         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1128
1129         kref_init(&bo->kref);
1130         INIT_LIST_HEAD(&bo->lru);
1131         INIT_LIST_HEAD(&bo->ddestroy);
1132         INIT_LIST_HEAD(&bo->swap);
1133         bo->bdev = bdev;
1134         bo->type = type;
1135         bo->mem.mem_type = TTM_PL_SYSTEM;
1136         bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1137         bo->mem.mm_node = NULL;
1138         bo->mem.page_alignment = page_alignment;
1139         bo->mem.bus.offset = 0;
1140         bo->mem.bus.addr = NULL;
1141         bo->moving = NULL;
1142         bo->mem.placement = 0;
1143         bo->acc_size = acc_size;
1144         bo->pin_count = 0;
1145         bo->sg = sg;
1146         if (resv) {
1147                 bo->base.resv = resv;
1148                 dma_resv_assert_held(bo->base.resv);
1149         } else {
1150                 bo->base.resv = &bo->base._resv;
1151         }
1152         if (!ttm_bo_uses_embedded_gem_object(bo)) {
1153                 /*
1154                  * bo.base is not initialized, so we have to setup the
1155                  * struct elements we want use regardless.
1156                  */
1157                 bo->base.size = size;
1158                 dma_resv_init(&bo->base._resv);
1159                 drm_vma_node_reset(&bo->base.vma_node);
1160         }
1161         atomic_inc(&ttm_bo_glob.bo_count);
1162
1163         /*
1164          * For ttm_bo_type_device buffers, allocate
1165          * address space from the device.
1166          */
1167         if (bo->type == ttm_bo_type_device ||
1168             bo->type == ttm_bo_type_sg)
1169                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1170                                          bo->mem.num_pages);
1171
1172         /* passed reservation objects should already be locked,
1173          * since otherwise lockdep will be angered in radeon.
1174          */
1175         if (!resv) {
1176                 locked = dma_resv_trylock(bo->base.resv);
1177                 WARN_ON(!locked);
1178         }
1179
1180         if (likely(!ret))
1181                 ret = ttm_bo_validate(bo, placement, ctx);
1182
1183         if (unlikely(ret)) {
1184                 if (!resv)
1185                         ttm_bo_unreserve(bo);
1186
1187                 ttm_bo_put(bo);
1188                 return ret;
1189         }
1190
1191         ttm_bo_move_to_lru_tail_unlocked(bo);
1192
1193         return ret;
1194 }
1195 EXPORT_SYMBOL(ttm_bo_init_reserved);
1196
1197 int ttm_bo_init(struct ttm_bo_device *bdev,
1198                 struct ttm_buffer_object *bo,
1199                 size_t size,
1200                 enum ttm_bo_type type,
1201                 struct ttm_placement *placement,
1202                 uint32_t page_alignment,
1203                 bool interruptible,
1204                 size_t acc_size,
1205                 struct sg_table *sg,
1206                 struct dma_resv *resv,
1207                 void (*destroy) (struct ttm_buffer_object *))
1208 {
1209         struct ttm_operation_ctx ctx = { interruptible, false };
1210         int ret;
1211
1212         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1213                                    page_alignment, &ctx, acc_size,
1214                                    sg, resv, destroy);
1215         if (ret)
1216                 return ret;
1217
1218         if (!resv)
1219                 ttm_bo_unreserve(bo);
1220
1221         return 0;
1222 }
1223 EXPORT_SYMBOL(ttm_bo_init);
1224
1225 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1226                            unsigned long bo_size,
1227                            unsigned struct_size)
1228 {
1229         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1230         size_t size = 0;
1231
1232         size += ttm_round_pot(struct_size);
1233         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1234         size += ttm_round_pot(sizeof(struct ttm_tt));
1235         return size;
1236 }
1237 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1238
1239 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1240 {
1241         struct ttm_bo_global *glob =
1242                 container_of(kobj, struct ttm_bo_global, kobj);
1243
1244         __free_page(glob->dummy_read_page);
1245 }
1246
1247 static void ttm_bo_global_release(void)
1248 {
1249         struct ttm_bo_global *glob = &ttm_bo_glob;
1250
1251         mutex_lock(&ttm_global_mutex);
1252         if (--ttm_bo_glob_use_count > 0)
1253                 goto out;
1254
1255         kobject_del(&glob->kobj);
1256         kobject_put(&glob->kobj);
1257         ttm_mem_global_release(&ttm_mem_glob);
1258         memset(glob, 0, sizeof(*glob));
1259 out:
1260         mutex_unlock(&ttm_global_mutex);
1261 }
1262
1263 static int ttm_bo_global_init(void)
1264 {
1265         struct ttm_bo_global *glob = &ttm_bo_glob;
1266         int ret = 0;
1267         unsigned i;
1268
1269         mutex_lock(&ttm_global_mutex);
1270         if (++ttm_bo_glob_use_count > 1)
1271                 goto out;
1272
1273         ret = ttm_mem_global_init(&ttm_mem_glob);
1274         if (ret)
1275                 goto out;
1276
1277         spin_lock_init(&glob->lru_lock);
1278         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1279
1280         if (unlikely(glob->dummy_read_page == NULL)) {
1281                 ret = -ENOMEM;
1282                 goto out;
1283         }
1284
1285         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1286                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1287         INIT_LIST_HEAD(&glob->device_list);
1288         atomic_set(&glob->bo_count, 0);
1289
1290         ret = kobject_init_and_add(
1291                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1292         if (unlikely(ret != 0))
1293                 kobject_put(&glob->kobj);
1294 out:
1295         mutex_unlock(&ttm_global_mutex);
1296         return ret;
1297 }
1298
1299 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1300 {
1301         struct ttm_bo_global *glob = &ttm_bo_glob;
1302         int ret = 0;
1303         unsigned i;
1304         struct ttm_resource_manager *man;
1305
1306         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1307         ttm_resource_manager_set_used(man, false);
1308         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1309
1310         mutex_lock(&ttm_global_mutex);
1311         list_del(&bdev->device_list);
1312         mutex_unlock(&ttm_global_mutex);
1313
1314         cancel_delayed_work_sync(&bdev->wq);
1315
1316         if (ttm_bo_delayed_delete(bdev, true))
1317                 pr_debug("Delayed destroy list was clean\n");
1318
1319         spin_lock(&glob->lru_lock);
1320         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1321                 if (list_empty(&man->lru[0]))
1322                         pr_debug("Swap list %d was clean\n", i);
1323         spin_unlock(&glob->lru_lock);
1324
1325         ttm_pool_fini(&bdev->pool);
1326
1327         if (!ret)
1328                 ttm_bo_global_release();
1329
1330         return ret;
1331 }
1332 EXPORT_SYMBOL(ttm_bo_device_release);
1333
1334 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1335 {
1336         struct ttm_resource_manager *man = &bdev->sysman;
1337
1338         /*
1339          * Initialize the system memory buffer type.
1340          * Other types need to be driver / IOCTL initialized.
1341          */
1342         man->use_tt = true;
1343
1344         ttm_resource_manager_init(man, 0);
1345         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1346         ttm_resource_manager_set_used(man, true);
1347 }
1348
1349 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1350                        struct ttm_bo_driver *driver,
1351                        struct device *dev,
1352                        struct address_space *mapping,
1353                        struct drm_vma_offset_manager *vma_manager,
1354                        bool use_dma_alloc, bool use_dma32)
1355 {
1356         struct ttm_bo_global *glob = &ttm_bo_glob;
1357         int ret;
1358
1359         if (WARN_ON(vma_manager == NULL))
1360                 return -EINVAL;
1361
1362         ret = ttm_bo_global_init();
1363         if (ret)
1364                 return ret;
1365
1366         bdev->driver = driver;
1367
1368         ttm_bo_init_sysman(bdev);
1369         ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
1370
1371         bdev->vma_manager = vma_manager;
1372         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1373         INIT_LIST_HEAD(&bdev->ddestroy);
1374         bdev->dev_mapping = mapping;
1375         mutex_lock(&ttm_global_mutex);
1376         list_add_tail(&bdev->device_list, &glob->device_list);
1377         mutex_unlock(&ttm_global_mutex);
1378
1379         return 0;
1380 }
1381 EXPORT_SYMBOL(ttm_bo_device_init);
1382
1383 /*
1384  * buffer object vm functions.
1385  */
1386
1387 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1388 {
1389         struct ttm_bo_device *bdev = bo->bdev;
1390
1391         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1392         ttm_mem_io_free(bdev, &bo->mem);
1393 }
1394 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1395
1396 int ttm_bo_wait(struct ttm_buffer_object *bo,
1397                 bool interruptible, bool no_wait)
1398 {
1399         long timeout = 15 * HZ;
1400
1401         if (no_wait) {
1402                 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1403                         return 0;
1404                 else
1405                         return -EBUSY;
1406         }
1407
1408         timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1409                                                       interruptible, timeout);
1410         if (timeout < 0)
1411                 return timeout;
1412
1413         if (timeout == 0)
1414                 return -EBUSY;
1415
1416         dma_resv_add_excl_fence(bo->base.resv, NULL);
1417         return 0;
1418 }
1419 EXPORT_SYMBOL(ttm_bo_wait);
1420
1421 /*
1422  * A buffer object shrink method that tries to swap out the first
1423  * buffer object on the bo_global::swap_lru list.
1424  */
1425 int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
1426 {
1427         struct ttm_bo_global *glob = &ttm_bo_glob;
1428         struct ttm_buffer_object *bo;
1429         int ret = -EBUSY;
1430         bool locked;
1431         unsigned i;
1432
1433         spin_lock(&glob->lru_lock);
1434         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1435                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1436                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1437                                                             NULL))
1438                                 continue;
1439
1440                         if (!ttm_bo_get_unless_zero(bo)) {
1441                                 if (locked)
1442                                         dma_resv_unlock(bo->base.resv);
1443                                 continue;
1444                         }
1445
1446                         ret = 0;
1447                         break;
1448                 }
1449                 if (!ret)
1450                         break;
1451         }
1452
1453         if (ret) {
1454                 spin_unlock(&glob->lru_lock);
1455                 return ret;
1456         }
1457
1458         if (bo->deleted) {
1459                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1460                 ttm_bo_put(bo);
1461                 return ret;
1462         }
1463
1464         ttm_bo_del_from_lru(bo);
1465         spin_unlock(&glob->lru_lock);
1466
1467         /**
1468          * Move to system cached
1469          */
1470
1471         if (bo->mem.mem_type != TTM_PL_SYSTEM) {
1472                 struct ttm_operation_ctx ctx = { false, false };
1473                 struct ttm_resource evict_mem;
1474                 struct ttm_place hop;
1475
1476                 memset(&hop, 0, sizeof(hop));
1477
1478                 evict_mem = bo->mem;
1479                 evict_mem.mm_node = NULL;
1480                 evict_mem.placement = 0;
1481                 evict_mem.mem_type = TTM_PL_SYSTEM;
1482
1483                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
1484                 if (unlikely(ret != 0)) {
1485                         WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1486                         goto out;
1487                 }
1488         }
1489
1490         /**
1491          * Make sure BO is idle.
1492          */
1493
1494         ret = ttm_bo_wait(bo, false, false);
1495         if (unlikely(ret != 0))
1496                 goto out;
1497
1498         ttm_bo_unmap_virtual(bo);
1499
1500         /**
1501          * Swap out. Buffer will be swapped in again as soon as
1502          * anyone tries to access a ttm page.
1503          */
1504
1505         if (bo->bdev->driver->swap_notify)
1506                 bo->bdev->driver->swap_notify(bo);
1507
1508         ret = ttm_tt_swapout(bo->bdev, bo->ttm);
1509 out:
1510
1511         /**
1512          *
1513          * Unreserve without putting on LRU to avoid swapping out an
1514          * already swapped buffer.
1515          */
1516         if (locked)
1517                 dma_resv_unlock(bo->base.resv);
1518         ttm_bo_put(bo);
1519         return ret;
1520 }
1521 EXPORT_SYMBOL(ttm_bo_swapout);
1522
1523 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1524 {
1525         if (bo->ttm == NULL)
1526                 return;
1527
1528         ttm_tt_destroy(bo->bdev, bo->ttm);
1529         bo->ttm = NULL;
1530 }
1531