Merge drm/drm-next into drm-misc-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <linux/jiffies.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/file.h>
42 #include <linux/module.h>
43 #include <linux/atomic.h>
44 #include <linux/dma-resv.h>
45
46 static void ttm_bo_global_kobj_release(struct kobject *kobj);
47
48 /**
49  * ttm_global_mutex - protecting the global BO state
50  */
51 DEFINE_MUTEX(ttm_global_mutex);
52 unsigned ttm_bo_glob_use_count;
53 struct ttm_bo_global ttm_bo_glob;
54 EXPORT_SYMBOL(ttm_bo_glob);
55
56 static struct attribute ttm_bo_count = {
57         .name = "bo_count",
58         .mode = S_IRUGO
59 };
60
61 /* default destructor */
62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
63 {
64         kfree(bo);
65 }
66
67 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
68                                           uint32_t *mem_type)
69 {
70         int pos;
71
72         pos = ffs(place->flags & TTM_PL_MASK_MEM);
73         if (unlikely(!pos))
74                 return -EINVAL;
75
76         *mem_type = pos - 1;
77         return 0;
78 }
79
80 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
81                                         struct ttm_placement *placement)
82 {
83         struct drm_printer p = drm_debug_printer(TTM_PFX);
84         int i, ret, mem_type;
85         struct ttm_resource_manager *man;
86
87         drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
88                    bo, bo->mem.num_pages, bo->mem.size >> 10,
89                    bo->mem.size >> 20);
90         for (i = 0; i < placement->num_placement; i++) {
91                 ret = ttm_mem_type_from_place(&placement->placement[i],
92                                                 &mem_type);
93                 if (ret)
94                         return;
95                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
96                            i, placement->placement[i].flags, mem_type);
97                 man = ttm_manager_type(bo->bdev, mem_type);
98                 ttm_resource_manager_debug(man, &p);
99         }
100 }
101
102 static ssize_t ttm_bo_global_show(struct kobject *kobj,
103                                   struct attribute *attr,
104                                   char *buffer)
105 {
106         struct ttm_bo_global *glob =
107                 container_of(kobj, struct ttm_bo_global, kobj);
108
109         return snprintf(buffer, PAGE_SIZE, "%d\n",
110                                 atomic_read(&glob->bo_count));
111 }
112
113 static struct attribute *ttm_bo_global_attrs[] = {
114         &ttm_bo_count,
115         NULL
116 };
117
118 static const struct sysfs_ops ttm_bo_global_ops = {
119         .show = &ttm_bo_global_show
120 };
121
122 static struct kobj_type ttm_bo_glob_kobj_type  = {
123         .release = &ttm_bo_global_kobj_release,
124         .sysfs_ops = &ttm_bo_global_ops,
125         .default_attrs = ttm_bo_global_attrs
126 };
127
128
129 static inline uint32_t ttm_bo_type_flags(unsigned type)
130 {
131         return 1 << (type);
132 }
133
134 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
135                                   struct ttm_resource *mem)
136 {
137         struct ttm_bo_device *bdev = bo->bdev;
138         struct ttm_resource_manager *man;
139
140         if (!list_empty(&bo->lru))
141                 return;
142
143         if (mem->placement & TTM_PL_FLAG_NO_EVICT)
144                 return;
145
146         man = ttm_manager_type(bdev, mem->mem_type);
147         list_add_tail(&bo->lru, &man->lru[bo->priority]);
148
149         if (man->use_tt && bo->ttm &&
150             !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
151                                      TTM_PAGE_FLAG_SWAPPED))) {
152                 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
153         }
154 }
155
156 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
157 {
158         struct ttm_bo_device *bdev = bo->bdev;
159         bool notify = false;
160
161         if (!list_empty(&bo->swap)) {
162                 list_del_init(&bo->swap);
163                 notify = true;
164         }
165         if (!list_empty(&bo->lru)) {
166                 list_del_init(&bo->lru);
167                 notify = true;
168         }
169
170         if (notify && bdev->driver->del_from_lru_notify)
171                 bdev->driver->del_from_lru_notify(bo);
172 }
173
174 static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
175                                      struct ttm_buffer_object *bo)
176 {
177         if (!pos->first)
178                 pos->first = bo;
179         pos->last = bo;
180 }
181
182 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
183                              struct ttm_lru_bulk_move *bulk)
184 {
185         dma_resv_assert_held(bo->base.resv);
186
187         ttm_bo_del_from_lru(bo);
188         ttm_bo_add_mem_to_lru(bo, &bo->mem);
189
190         if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
191                 switch (bo->mem.mem_type) {
192                 case TTM_PL_TT:
193                         ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
194                         break;
195
196                 case TTM_PL_VRAM:
197                         ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
198                         break;
199                 }
200                 if (bo->ttm && !(bo->ttm->page_flags &
201                                  (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
202                         ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
203         }
204 }
205 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
206
207 void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
208 {
209         unsigned i;
210
211         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
212                 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
213                 struct ttm_resource_manager *man;
214
215                 if (!pos->first)
216                         continue;
217
218                 dma_resv_assert_held(pos->first->base.resv);
219                 dma_resv_assert_held(pos->last->base.resv);
220
221                 man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
222                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
223                                     &pos->last->lru);
224         }
225
226         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
227                 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
228                 struct ttm_resource_manager *man;
229
230                 if (!pos->first)
231                         continue;
232
233                 dma_resv_assert_held(pos->first->base.resv);
234                 dma_resv_assert_held(pos->last->base.resv);
235
236                 man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
237                 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
238                                     &pos->last->lru);
239         }
240
241         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
242                 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
243                 struct list_head *lru;
244
245                 if (!pos->first)
246                         continue;
247
248                 dma_resv_assert_held(pos->first->base.resv);
249                 dma_resv_assert_held(pos->last->base.resv);
250
251                 lru = &ttm_bo_glob.swap_lru[i];
252                 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
253         }
254 }
255 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
256
257 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
258                                   struct ttm_resource *mem, bool evict,
259                                   struct ttm_operation_ctx *ctx)
260 {
261         struct ttm_bo_device *bdev = bo->bdev;
262         struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
263         struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
264         int ret;
265
266         ret = ttm_mem_io_lock(old_man, true);
267         if (unlikely(ret != 0))
268                 goto out_err;
269         ttm_bo_unmap_virtual_locked(bo);
270         ttm_mem_io_unlock(old_man);
271
272         /*
273          * Create and bind a ttm if required.
274          */
275
276         if (new_man->use_tt) {
277                 /* Zero init the new TTM structure if the old location should
278                  * have used one as well.
279                  */
280                 ret = ttm_tt_create(bo, old_man->use_tt);
281                 if (ret)
282                         goto out_err;
283
284                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
285                 if (ret)
286                         goto out_err;
287
288                 if (mem->mem_type != TTM_PL_SYSTEM) {
289                         ret = ttm_tt_bind(bo->ttm, mem, ctx);
290                         if (ret)
291                                 goto out_err;
292                 }
293
294                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
295                         if (bdev->driver->move_notify)
296                                 bdev->driver->move_notify(bo, evict, mem);
297                         bo->mem = *mem;
298                         goto moved;
299                 }
300         }
301
302         if (bdev->driver->move_notify)
303                 bdev->driver->move_notify(bo, evict, mem);
304
305         if (old_man->use_tt && new_man->use_tt)
306                 ret = ttm_bo_move_ttm(bo, ctx, mem);
307         else if (bdev->driver->move)
308                 ret = bdev->driver->move(bo, evict, ctx, mem);
309         else
310                 ret = ttm_bo_move_memcpy(bo, ctx, mem);
311
312         if (ret) {
313                 if (bdev->driver->move_notify) {
314                         swap(*mem, bo->mem);
315                         bdev->driver->move_notify(bo, false, mem);
316                         swap(*mem, bo->mem);
317                 }
318
319                 goto out_err;
320         }
321
322 moved:
323         bo->evicted = false;
324
325         ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
326         return 0;
327
328 out_err:
329         new_man = ttm_manager_type(bdev, bo->mem.mem_type);
330         if (!new_man->use_tt) {
331                 ttm_tt_destroy(bo->ttm);
332                 bo->ttm = NULL;
333         }
334
335         return ret;
336 }
337
338 /**
339  * Call bo::reserved.
340  * Will release GPU memory type usage on destruction.
341  * This is the place to put in driver specific hooks to release
342  * driver private resources.
343  * Will release the bo::reserved lock.
344  */
345
346 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
347 {
348         if (bo->bdev->driver->move_notify)
349                 bo->bdev->driver->move_notify(bo, false, NULL);
350
351         ttm_tt_destroy(bo->ttm);
352         bo->ttm = NULL;
353         ttm_resource_free(bo, &bo->mem);
354 }
355
356 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
357 {
358         int r;
359
360         if (bo->base.resv == &bo->base._resv)
361                 return 0;
362
363         BUG_ON(!dma_resv_trylock(&bo->base._resv));
364
365         r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
366         dma_resv_unlock(&bo->base._resv);
367         if (r)
368                 return r;
369
370         if (bo->type != ttm_bo_type_sg) {
371                 /* This works because the BO is about to be destroyed and nobody
372                  * reference it any more. The only tricky case is the trylock on
373                  * the resv object while holding the lru_lock.
374                  */
375                 spin_lock(&ttm_bo_glob.lru_lock);
376                 bo->base.resv = &bo->base._resv;
377                 spin_unlock(&ttm_bo_glob.lru_lock);
378         }
379
380         return r;
381 }
382
383 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
384 {
385         struct dma_resv *resv = &bo->base._resv;
386         struct dma_resv_list *fobj;
387         struct dma_fence *fence;
388         int i;
389
390         rcu_read_lock();
391         fobj = rcu_dereference(resv->fence);
392         fence = rcu_dereference(resv->fence_excl);
393         if (fence && !fence->ops->signaled)
394                 dma_fence_enable_sw_signaling(fence);
395
396         for (i = 0; fobj && i < fobj->shared_count; ++i) {
397                 fence = rcu_dereference(fobj->shared[i]);
398
399                 if (!fence->ops->signaled)
400                         dma_fence_enable_sw_signaling(fence);
401         }
402         rcu_read_unlock();
403 }
404
405 /**
406  * function ttm_bo_cleanup_refs
407  * If bo idle, remove from lru lists, and unref.
408  * If not idle, block if possible.
409  *
410  * Must be called with lru_lock and reservation held, this function
411  * will drop the lru lock and optionally the reservation lock before returning.
412  *
413  * @interruptible         Any sleeps should occur interruptibly.
414  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
415  * @unlock_resv           Unlock the reservation lock as well.
416  */
417
418 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
419                                bool interruptible, bool no_wait_gpu,
420                                bool unlock_resv)
421 {
422         struct dma_resv *resv = &bo->base._resv;
423         int ret;
424
425         if (dma_resv_test_signaled_rcu(resv, true))
426                 ret = 0;
427         else
428                 ret = -EBUSY;
429
430         if (ret && !no_wait_gpu) {
431                 long lret;
432
433                 if (unlock_resv)
434                         dma_resv_unlock(bo->base.resv);
435                 spin_unlock(&ttm_bo_glob.lru_lock);
436
437                 lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
438                                                  30 * HZ);
439
440                 if (lret < 0)
441                         return lret;
442                 else if (lret == 0)
443                         return -EBUSY;
444
445                 spin_lock(&ttm_bo_glob.lru_lock);
446                 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
447                         /*
448                          * We raced, and lost, someone else holds the reservation now,
449                          * and is probably busy in ttm_bo_cleanup_memtype_use.
450                          *
451                          * Even if it's not the case, because we finished waiting any
452                          * delayed destruction would succeed, so just return success
453                          * here.
454                          */
455                         spin_unlock(&ttm_bo_glob.lru_lock);
456                         return 0;
457                 }
458                 ret = 0;
459         }
460
461         if (ret || unlikely(list_empty(&bo->ddestroy))) {
462                 if (unlock_resv)
463                         dma_resv_unlock(bo->base.resv);
464                 spin_unlock(&ttm_bo_glob.lru_lock);
465                 return ret;
466         }
467
468         ttm_bo_del_from_lru(bo);
469         list_del_init(&bo->ddestroy);
470         spin_unlock(&ttm_bo_glob.lru_lock);
471         ttm_bo_cleanup_memtype_use(bo);
472
473         if (unlock_resv)
474                 dma_resv_unlock(bo->base.resv);
475
476         ttm_bo_put(bo);
477
478         return 0;
479 }
480
481 /**
482  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
483  * encountered buffers.
484  */
485 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
486 {
487         struct ttm_bo_global *glob = &ttm_bo_glob;
488         struct list_head removed;
489         bool empty;
490
491         INIT_LIST_HEAD(&removed);
492
493         spin_lock(&glob->lru_lock);
494         while (!list_empty(&bdev->ddestroy)) {
495                 struct ttm_buffer_object *bo;
496
497                 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
498                                       ddestroy);
499                 list_move_tail(&bo->ddestroy, &removed);
500                 if (!ttm_bo_get_unless_zero(bo))
501                         continue;
502
503                 if (remove_all || bo->base.resv != &bo->base._resv) {
504                         spin_unlock(&glob->lru_lock);
505                         dma_resv_lock(bo->base.resv, NULL);
506
507                         spin_lock(&glob->lru_lock);
508                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
509
510                 } else if (dma_resv_trylock(bo->base.resv)) {
511                         ttm_bo_cleanup_refs(bo, false, !remove_all, true);
512                 } else {
513                         spin_unlock(&glob->lru_lock);
514                 }
515
516                 ttm_bo_put(bo);
517                 spin_lock(&glob->lru_lock);
518         }
519         list_splice_tail(&removed, &bdev->ddestroy);
520         empty = list_empty(&bdev->ddestroy);
521         spin_unlock(&glob->lru_lock);
522
523         return empty;
524 }
525
526 static void ttm_bo_delayed_workqueue(struct work_struct *work)
527 {
528         struct ttm_bo_device *bdev =
529             container_of(work, struct ttm_bo_device, wq.work);
530
531         if (!ttm_bo_delayed_delete(bdev, false))
532                 schedule_delayed_work(&bdev->wq,
533                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
534 }
535
536 static void ttm_bo_release(struct kref *kref)
537 {
538         struct ttm_buffer_object *bo =
539             container_of(kref, struct ttm_buffer_object, kref);
540         struct ttm_bo_device *bdev = bo->bdev;
541         struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
542         size_t acc_size = bo->acc_size;
543         int ret;
544
545         if (!bo->deleted) {
546                 ret = ttm_bo_individualize_resv(bo);
547                 if (ret) {
548                         /* Last resort, if we fail to allocate memory for the
549                          * fences block for the BO to become idle
550                          */
551                         dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
552                                                   30 * HZ);
553                 }
554
555                 if (bo->bdev->driver->release_notify)
556                         bo->bdev->driver->release_notify(bo);
557
558                 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
559                 ttm_mem_io_lock(man, false);
560                 ttm_mem_io_free_vm(bo);
561                 ttm_mem_io_unlock(man);
562         }
563
564         if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
565             !dma_resv_trylock(bo->base.resv)) {
566                 /* The BO is not idle, resurrect it for delayed destroy */
567                 ttm_bo_flush_all_fences(bo);
568                 bo->deleted = true;
569
570                 spin_lock(&ttm_bo_glob.lru_lock);
571
572                 /*
573                  * Make NO_EVICT bos immediately available to
574                  * shrinkers, now that they are queued for
575                  * destruction.
576                  */
577                 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
578                         bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
579                         ttm_bo_del_from_lru(bo);
580                         ttm_bo_add_mem_to_lru(bo, &bo->mem);
581                 }
582
583                 kref_init(&bo->kref);
584                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
585                 spin_unlock(&ttm_bo_glob.lru_lock);
586
587                 schedule_delayed_work(&bdev->wq,
588                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
589                 return;
590         }
591
592         spin_lock(&ttm_bo_glob.lru_lock);
593         ttm_bo_del_from_lru(bo);
594         list_del(&bo->ddestroy);
595         spin_unlock(&ttm_bo_glob.lru_lock);
596
597         ttm_bo_cleanup_memtype_use(bo);
598         dma_resv_unlock(bo->base.resv);
599
600         atomic_dec(&ttm_bo_glob.bo_count);
601         dma_fence_put(bo->moving);
602         if (!ttm_bo_uses_embedded_gem_object(bo))
603                 dma_resv_fini(&bo->base._resv);
604         bo->destroy(bo);
605         ttm_mem_global_free(&ttm_mem_glob, acc_size);
606 }
607
608 void ttm_bo_put(struct ttm_buffer_object *bo)
609 {
610         kref_put(&bo->kref, ttm_bo_release);
611 }
612 EXPORT_SYMBOL(ttm_bo_put);
613
614 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
615 {
616         return cancel_delayed_work_sync(&bdev->wq);
617 }
618 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
619
620 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
621 {
622         if (resched)
623                 schedule_delayed_work(&bdev->wq,
624                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
625 }
626 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
627
628 static int ttm_bo_evict(struct ttm_buffer_object *bo,
629                         struct ttm_operation_ctx *ctx)
630 {
631         struct ttm_bo_device *bdev = bo->bdev;
632         struct ttm_resource evict_mem;
633         struct ttm_placement placement;
634         int ret = 0;
635
636         dma_resv_assert_held(bo->base.resv);
637
638         placement.num_placement = 0;
639         placement.num_busy_placement = 0;
640         bdev->driver->evict_flags(bo, &placement);
641
642         if (!placement.num_placement && !placement.num_busy_placement) {
643                 ttm_bo_wait(bo, false, false);
644
645                 ttm_bo_cleanup_memtype_use(bo);
646                 return ttm_tt_create(bo, false);
647         }
648
649         evict_mem = bo->mem;
650         evict_mem.mm_node = NULL;
651         evict_mem.bus.io_reserved_vm = false;
652         evict_mem.bus.io_reserved_count = 0;
653
654         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
655         if (ret) {
656                 if (ret != -ERESTARTSYS) {
657                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
658                                bo);
659                         ttm_bo_mem_space_debug(bo, &placement);
660                 }
661                 goto out;
662         }
663
664         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
665         if (unlikely(ret)) {
666                 if (ret != -ERESTARTSYS)
667                         pr_err("Buffer eviction failed\n");
668                 ttm_resource_free(bo, &evict_mem);
669                 goto out;
670         }
671         bo->evicted = true;
672 out:
673         return ret;
674 }
675
676 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
677                               const struct ttm_place *place)
678 {
679         /* Don't evict this BO if it's outside of the
680          * requested placement range
681          */
682         if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
683             (place->lpfn && place->lpfn <= bo->mem.start))
684                 return false;
685
686         return true;
687 }
688 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
689
690 /**
691  * Check the target bo is allowable to be evicted or swapout, including cases:
692  *
693  * a. if share same reservation object with ctx->resv, have assumption
694  * reservation objects should already be locked, so not lock again and
695  * return true directly when either the opreation allow_reserved_eviction
696  * or the target bo already is in delayed free list;
697  *
698  * b. Otherwise, trylock it.
699  */
700 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
701                         struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
702 {
703         bool ret = false;
704
705         if (bo->base.resv == ctx->resv) {
706                 dma_resv_assert_held(bo->base.resv);
707                 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
708                         ret = true;
709                 *locked = false;
710                 if (busy)
711                         *busy = false;
712         } else {
713                 ret = dma_resv_trylock(bo->base.resv);
714                 *locked = ret;
715                 if (busy)
716                         *busy = !ret;
717         }
718
719         return ret;
720 }
721
722 /**
723  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
724  *
725  * @busy_bo: BO which couldn't be locked with trylock
726  * @ctx: operation context
727  * @ticket: acquire ticket
728  *
729  * Try to lock a busy buffer object to avoid failing eviction.
730  */
731 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
732                                    struct ttm_operation_ctx *ctx,
733                                    struct ww_acquire_ctx *ticket)
734 {
735         int r;
736
737         if (!busy_bo || !ticket)
738                 return -EBUSY;
739
740         if (ctx->interruptible)
741                 r = dma_resv_lock_interruptible(busy_bo->base.resv,
742                                                           ticket);
743         else
744                 r = dma_resv_lock(busy_bo->base.resv, ticket);
745
746         /*
747          * TODO: It would be better to keep the BO locked until allocation is at
748          * least tried one more time, but that would mean a much larger rework
749          * of TTM.
750          */
751         if (!r)
752                 dma_resv_unlock(busy_bo->base.resv);
753
754         return r == -EDEADLK ? -EBUSY : r;
755 }
756
757 int ttm_mem_evict_first(struct ttm_bo_device *bdev,
758                         struct ttm_resource_manager *man,
759                         const struct ttm_place *place,
760                         struct ttm_operation_ctx *ctx,
761                         struct ww_acquire_ctx *ticket)
762 {
763         struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
764         bool locked = false;
765         unsigned i;
766         int ret;
767
768         spin_lock(&ttm_bo_glob.lru_lock);
769         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
770                 list_for_each_entry(bo, &man->lru[i], lru) {
771                         bool busy;
772
773                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
774                                                             &busy)) {
775                                 if (busy && !busy_bo && ticket !=
776                                     dma_resv_locking_ctx(bo->base.resv))
777                                         busy_bo = bo;
778                                 continue;
779                         }
780
781                         if (place && !bdev->driver->eviction_valuable(bo,
782                                                                       place)) {
783                                 if (locked)
784                                         dma_resv_unlock(bo->base.resv);
785                                 continue;
786                         }
787                         if (!ttm_bo_get_unless_zero(bo)) {
788                                 if (locked)
789                                         dma_resv_unlock(bo->base.resv);
790                                 continue;
791                         }
792                         break;
793                 }
794
795                 /* If the inner loop terminated early, we have our candidate */
796                 if (&bo->lru != &man->lru[i])
797                         break;
798
799                 bo = NULL;
800         }
801
802         if (!bo) {
803                 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
804                         busy_bo = NULL;
805                 spin_unlock(&ttm_bo_glob.lru_lock);
806                 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
807                 if (busy_bo)
808                         ttm_bo_put(busy_bo);
809                 return ret;
810         }
811
812         if (bo->deleted) {
813                 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
814                                           ctx->no_wait_gpu, locked);
815                 ttm_bo_put(bo);
816                 return ret;
817         }
818
819         spin_unlock(&ttm_bo_glob.lru_lock);
820
821         ret = ttm_bo_evict(bo, ctx);
822         if (locked)
823                 ttm_bo_unreserve(bo);
824
825         ttm_bo_put(bo);
826         return ret;
827 }
828
829 /**
830  * Add the last move fence to the BO and reserve a new shared slot.
831  */
832 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
833                                  struct ttm_resource_manager *man,
834                                  struct ttm_resource *mem,
835                                  bool no_wait_gpu)
836 {
837         struct dma_fence *fence;
838         int ret;
839
840         spin_lock(&man->move_lock);
841         fence = dma_fence_get(man->move);
842         spin_unlock(&man->move_lock);
843
844         if (!fence)
845                 return 0;
846
847         if (no_wait_gpu) {
848                 dma_fence_put(fence);
849                 return -EBUSY;
850         }
851
852         dma_resv_add_shared_fence(bo->base.resv, fence);
853
854         ret = dma_resv_reserve_shared(bo->base.resv, 1);
855         if (unlikely(ret)) {
856                 dma_fence_put(fence);
857                 return ret;
858         }
859
860         dma_fence_put(bo->moving);
861         bo->moving = fence;
862         return 0;
863 }
864
865 /**
866  * Repeatedly evict memory from the LRU for @mem_type until we create enough
867  * space, or we've evicted everything and there isn't enough space.
868  */
869 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
870                                   const struct ttm_place *place,
871                                   struct ttm_resource *mem,
872                                   struct ttm_operation_ctx *ctx)
873 {
874         struct ttm_bo_device *bdev = bo->bdev;
875         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
876         struct ww_acquire_ctx *ticket;
877         int ret;
878
879         ticket = dma_resv_locking_ctx(bo->base.resv);
880         do {
881                 ret = ttm_resource_alloc(bo, place, mem);
882                 if (likely(!ret))
883                         break;
884                 if (unlikely(ret != -ENOSPC))
885                         return ret;
886                 ret = ttm_mem_evict_first(bdev, man, place, ctx,
887                                           ticket);
888                 if (unlikely(ret != 0))
889                         return ret;
890         } while (1);
891
892         return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
893 }
894
895 static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
896                                       uint32_t cur_placement,
897                                       uint32_t proposed_placement)
898 {
899         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
900         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
901
902         /**
903          * Keep current caching if possible.
904          */
905
906         if ((cur_placement & caching) != 0)
907                 result |= (cur_placement & caching);
908         else if ((man->default_caching & caching) != 0)
909                 result |= man->default_caching;
910         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
911                 result |= TTM_PL_FLAG_CACHED;
912         else if ((TTM_PL_FLAG_WC & caching) != 0)
913                 result |= TTM_PL_FLAG_WC;
914         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
915                 result |= TTM_PL_FLAG_UNCACHED;
916
917         return result;
918 }
919
920 static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man,
921                                  uint32_t mem_type,
922                                  const struct ttm_place *place,
923                                  uint32_t *masked_placement)
924 {
925         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
926
927         if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
928                 return false;
929
930         if ((place->flags & man->available_caching) == 0)
931                 return false;
932
933         cur_flags |= (place->flags & man->available_caching);
934
935         *masked_placement = cur_flags;
936         return true;
937 }
938
939 /**
940  * ttm_bo_mem_placement - check if placement is compatible
941  * @bo: BO to find memory for
942  * @place: where to search
943  * @mem: the memory object to fill in
944  * @ctx: operation context
945  *
946  * Check if placement is compatible and fill in mem structure.
947  * Returns -EBUSY if placement won't work or negative error code.
948  * 0 when placement can be used.
949  */
950 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
951                                 const struct ttm_place *place,
952                                 struct ttm_resource *mem,
953                                 struct ttm_operation_ctx *ctx)
954 {
955         struct ttm_bo_device *bdev = bo->bdev;
956         uint32_t mem_type = TTM_PL_SYSTEM;
957         struct ttm_resource_manager *man;
958         uint32_t cur_flags = 0;
959         int ret;
960
961         ret = ttm_mem_type_from_place(place, &mem_type);
962         if (ret)
963                 return ret;
964
965         man = ttm_manager_type(bdev, mem_type);
966         if (!man || !ttm_resource_manager_used(man))
967                 return -EBUSY;
968
969         if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
970                 return -EBUSY;
971
972         cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
973         /*
974          * Use the access and other non-mapping-related flag bits from
975          * the memory placement flags to the current flags
976          */
977         ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
978
979         mem->mem_type = mem_type;
980         mem->placement = cur_flags;
981
982         spin_lock(&ttm_bo_glob.lru_lock);
983         ttm_bo_del_from_lru(bo);
984         ttm_bo_add_mem_to_lru(bo, mem);
985         spin_unlock(&ttm_bo_glob.lru_lock);
986
987         return 0;
988 }
989
990 /**
991  * Creates space for memory region @mem according to its type.
992  *
993  * This function first searches for free space in compatible memory types in
994  * the priority order defined by the driver.  If free space isn't found, then
995  * ttm_bo_mem_force_space is attempted in priority order to evict and find
996  * space.
997  */
998 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
999                         struct ttm_placement *placement,
1000                         struct ttm_resource *mem,
1001                         struct ttm_operation_ctx *ctx)
1002 {
1003         struct ttm_bo_device *bdev = bo->bdev;
1004         bool type_found = false;
1005         int i, ret;
1006
1007         ret = dma_resv_reserve_shared(bo->base.resv, 1);
1008         if (unlikely(ret))
1009                 return ret;
1010
1011         for (i = 0; i < placement->num_placement; ++i) {
1012                 const struct ttm_place *place = &placement->placement[i];
1013                 struct ttm_resource_manager *man;
1014
1015                 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1016                 if (ret == -EBUSY)
1017                         continue;
1018                 if (ret)
1019                         goto error;
1020
1021                 type_found = true;
1022                 ret = ttm_resource_alloc(bo, place, mem);
1023                 if (ret == -ENOSPC)
1024                         continue;
1025                 if (unlikely(ret))
1026                         goto error;
1027
1028                 man = ttm_manager_type(bdev, mem->mem_type);
1029                 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
1030                 if (unlikely(ret)) {
1031                         ttm_resource_free(bo, mem);
1032                         if (ret == -EBUSY)
1033                                 continue;
1034
1035                         goto error;
1036                 }
1037                 return 0;
1038         }
1039
1040         for (i = 0; i < placement->num_busy_placement; ++i) {
1041                 const struct ttm_place *place = &placement->busy_placement[i];
1042
1043                 ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1044                 if (ret == -EBUSY)
1045                         continue;
1046                 if (ret)
1047                         goto error;
1048
1049                 type_found = true;
1050                 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1051                 if (likely(!ret))
1052                         return 0;
1053
1054                 if (ret && ret != -EBUSY)
1055                         goto error;
1056         }
1057
1058         ret = -ENOMEM;
1059         if (!type_found) {
1060                 pr_err(TTM_PFX "No compatible memory type found\n");
1061                 ret = -EINVAL;
1062         }
1063
1064 error:
1065         if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1066                 ttm_bo_move_to_lru_tail_unlocked(bo);
1067         }
1068
1069         return ret;
1070 }
1071 EXPORT_SYMBOL(ttm_bo_mem_space);
1072
1073 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1074                               struct ttm_placement *placement,
1075                               struct ttm_operation_ctx *ctx)
1076 {
1077         int ret = 0;
1078         struct ttm_resource mem;
1079
1080         dma_resv_assert_held(bo->base.resv);
1081
1082         mem.num_pages = bo->num_pages;
1083         mem.size = mem.num_pages << PAGE_SHIFT;
1084         mem.page_alignment = bo->mem.page_alignment;
1085         mem.bus.io_reserved_vm = false;
1086         mem.bus.io_reserved_count = 0;
1087         mem.mm_node = NULL;
1088
1089         /*
1090          * Determine where to move the buffer.
1091          */
1092         ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1093         if (ret)
1094                 goto out_unlock;
1095         ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1096 out_unlock:
1097         if (ret)
1098                 ttm_resource_free(bo, &mem);
1099         return ret;
1100 }
1101
1102 static bool ttm_bo_places_compat(const struct ttm_place *places,
1103                                  unsigned num_placement,
1104                                  struct ttm_resource *mem,
1105                                  uint32_t *new_flags)
1106 {
1107         unsigned i;
1108
1109         for (i = 0; i < num_placement; i++) {
1110                 const struct ttm_place *heap = &places[i];
1111
1112                 if ((mem->start < heap->fpfn ||
1113                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1114                         continue;
1115
1116                 *new_flags = heap->flags;
1117                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1118                     (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1119                     (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1120                      (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1121                         return true;
1122         }
1123         return false;
1124 }
1125
1126 bool ttm_bo_mem_compat(struct ttm_placement *placement,
1127                        struct ttm_resource *mem,
1128                        uint32_t *new_flags)
1129 {
1130         if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1131                                  mem, new_flags))
1132                 return true;
1133
1134         if ((placement->busy_placement != placement->placement ||
1135              placement->num_busy_placement > placement->num_placement) &&
1136             ttm_bo_places_compat(placement->busy_placement,
1137                                  placement->num_busy_placement,
1138                                  mem, new_flags))
1139                 return true;
1140
1141         return false;
1142 }
1143 EXPORT_SYMBOL(ttm_bo_mem_compat);
1144
1145 int ttm_bo_validate(struct ttm_buffer_object *bo,
1146                     struct ttm_placement *placement,
1147                     struct ttm_operation_ctx *ctx)
1148 {
1149         int ret;
1150         uint32_t new_flags;
1151
1152         dma_resv_assert_held(bo->base.resv);
1153
1154         /*
1155          * Remove the backing store if no placement is given.
1156          */
1157         if (!placement->num_placement && !placement->num_busy_placement) {
1158                 ret = ttm_bo_pipeline_gutting(bo);
1159                 if (ret)
1160                         return ret;
1161
1162                 return ttm_tt_create(bo, false);
1163         }
1164
1165         /*
1166          * Check whether we need to move buffer.
1167          */
1168         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1169                 ret = ttm_bo_move_buffer(bo, placement, ctx);
1170                 if (ret)
1171                         return ret;
1172         } else {
1173                 /*
1174                  * Use the access and other non-mapping-related flag bits from
1175                  * the compatible memory placement flags to the active flags
1176                  */
1177                 ttm_flag_masked(&bo->mem.placement, new_flags,
1178                                 ~TTM_PL_MASK_MEMTYPE);
1179         }
1180         /*
1181          * We might need to add a TTM.
1182          */
1183         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1184                 ret = ttm_tt_create(bo, true);
1185                 if (ret)
1186                         return ret;
1187         }
1188         return 0;
1189 }
1190 EXPORT_SYMBOL(ttm_bo_validate);
1191
1192 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1193                          struct ttm_buffer_object *bo,
1194                          unsigned long size,
1195                          enum ttm_bo_type type,
1196                          struct ttm_placement *placement,
1197                          uint32_t page_alignment,
1198                          struct ttm_operation_ctx *ctx,
1199                          size_t acc_size,
1200                          struct sg_table *sg,
1201                          struct dma_resv *resv,
1202                          void (*destroy) (struct ttm_buffer_object *))
1203 {
1204         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1205         int ret = 0;
1206         unsigned long num_pages;
1207         bool locked;
1208
1209         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1210         if (ret) {
1211                 pr_err("Out of kernel memory\n");
1212                 if (destroy)
1213                         (*destroy)(bo);
1214                 else
1215                         kfree(bo);
1216                 return -ENOMEM;
1217         }
1218
1219         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1220         if (num_pages == 0) {
1221                 pr_err("Illegal buffer object size\n");
1222                 if (destroy)
1223                         (*destroy)(bo);
1224                 else
1225                         kfree(bo);
1226                 ttm_mem_global_free(mem_glob, acc_size);
1227                 return -EINVAL;
1228         }
1229         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1230
1231         kref_init(&bo->kref);
1232         INIT_LIST_HEAD(&bo->lru);
1233         INIT_LIST_HEAD(&bo->ddestroy);
1234         INIT_LIST_HEAD(&bo->swap);
1235         INIT_LIST_HEAD(&bo->io_reserve_lru);
1236         bo->bdev = bdev;
1237         bo->type = type;
1238         bo->num_pages = num_pages;
1239         bo->mem.size = num_pages << PAGE_SHIFT;
1240         bo->mem.mem_type = TTM_PL_SYSTEM;
1241         bo->mem.num_pages = bo->num_pages;
1242         bo->mem.mm_node = NULL;
1243         bo->mem.page_alignment = page_alignment;
1244         bo->mem.bus.io_reserved_vm = false;
1245         bo->mem.bus.io_reserved_count = 0;
1246         bo->moving = NULL;
1247         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1248         bo->acc_size = acc_size;
1249         bo->sg = sg;
1250         if (resv) {
1251                 bo->base.resv = resv;
1252                 dma_resv_assert_held(bo->base.resv);
1253         } else {
1254                 bo->base.resv = &bo->base._resv;
1255         }
1256         if (!ttm_bo_uses_embedded_gem_object(bo)) {
1257                 /*
1258                  * bo.gem is not initialized, so we have to setup the
1259                  * struct elements we want use regardless.
1260                  */
1261                 dma_resv_init(&bo->base._resv);
1262                 drm_vma_node_reset(&bo->base.vma_node);
1263         }
1264         atomic_inc(&ttm_bo_glob.bo_count);
1265
1266         /*
1267          * For ttm_bo_type_device buffers, allocate
1268          * address space from the device.
1269          */
1270         if (bo->type == ttm_bo_type_device ||
1271             bo->type == ttm_bo_type_sg)
1272                 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
1273                                          bo->mem.num_pages);
1274
1275         /* passed reservation objects should already be locked,
1276          * since otherwise lockdep will be angered in radeon.
1277          */
1278         if (!resv) {
1279                 locked = dma_resv_trylock(bo->base.resv);
1280                 WARN_ON(!locked);
1281         }
1282
1283         if (likely(!ret))
1284                 ret = ttm_bo_validate(bo, placement, ctx);
1285
1286         if (unlikely(ret)) {
1287                 if (!resv)
1288                         ttm_bo_unreserve(bo);
1289
1290                 ttm_bo_put(bo);
1291                 return ret;
1292         }
1293
1294         ttm_bo_move_to_lru_tail_unlocked(bo);
1295
1296         return ret;
1297 }
1298 EXPORT_SYMBOL(ttm_bo_init_reserved);
1299
1300 int ttm_bo_init(struct ttm_bo_device *bdev,
1301                 struct ttm_buffer_object *bo,
1302                 unsigned long size,
1303                 enum ttm_bo_type type,
1304                 struct ttm_placement *placement,
1305                 uint32_t page_alignment,
1306                 bool interruptible,
1307                 size_t acc_size,
1308                 struct sg_table *sg,
1309                 struct dma_resv *resv,
1310                 void (*destroy) (struct ttm_buffer_object *))
1311 {
1312         struct ttm_operation_ctx ctx = { interruptible, false };
1313         int ret;
1314
1315         ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1316                                    page_alignment, &ctx, acc_size,
1317                                    sg, resv, destroy);
1318         if (ret)
1319                 return ret;
1320
1321         if (!resv)
1322                 ttm_bo_unreserve(bo);
1323
1324         return 0;
1325 }
1326 EXPORT_SYMBOL(ttm_bo_init);
1327
1328 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1329                        unsigned long bo_size,
1330                        unsigned struct_size)
1331 {
1332         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1333         size_t size = 0;
1334
1335         size += ttm_round_pot(struct_size);
1336         size += ttm_round_pot(npages * sizeof(void *));
1337         size += ttm_round_pot(sizeof(struct ttm_tt));
1338         return size;
1339 }
1340 EXPORT_SYMBOL(ttm_bo_acc_size);
1341
1342 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1343                            unsigned long bo_size,
1344                            unsigned struct_size)
1345 {
1346         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1347         size_t size = 0;
1348
1349         size += ttm_round_pot(struct_size);
1350         size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1351         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1352         return size;
1353 }
1354 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1355
1356 int ttm_bo_create(struct ttm_bo_device *bdev,
1357                         unsigned long size,
1358                         enum ttm_bo_type type,
1359                         struct ttm_placement *placement,
1360                         uint32_t page_alignment,
1361                         bool interruptible,
1362                         struct ttm_buffer_object **p_bo)
1363 {
1364         struct ttm_buffer_object *bo;
1365         size_t acc_size;
1366         int ret;
1367
1368         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1369         if (unlikely(bo == NULL))
1370                 return -ENOMEM;
1371
1372         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1373         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1374                           interruptible, acc_size,
1375                           NULL, NULL, NULL);
1376         if (likely(ret == 0))
1377                 *p_bo = bo;
1378
1379         return ret;
1380 }
1381 EXPORT_SYMBOL(ttm_bo_create);
1382
1383 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1384 {
1385         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
1386
1387         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1388                 pr_err("Illegal memory manager memory type %u\n", mem_type);
1389                 return -EINVAL;
1390         }
1391
1392         if (!man) {
1393                 pr_err("Memory type %u has not been initialized\n", mem_type);
1394                 return 0;
1395         }
1396
1397         return ttm_resource_manager_force_list_clean(bdev, man);
1398 }
1399 EXPORT_SYMBOL(ttm_bo_evict_mm);
1400
1401 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1402 {
1403         struct ttm_bo_global *glob =
1404                 container_of(kobj, struct ttm_bo_global, kobj);
1405
1406         __free_page(glob->dummy_read_page);
1407 }
1408
1409 static void ttm_bo_global_release(void)
1410 {
1411         struct ttm_bo_global *glob = &ttm_bo_glob;
1412
1413         mutex_lock(&ttm_global_mutex);
1414         if (--ttm_bo_glob_use_count > 0)
1415                 goto out;
1416
1417         kobject_del(&glob->kobj);
1418         kobject_put(&glob->kobj);
1419         ttm_mem_global_release(&ttm_mem_glob);
1420         memset(glob, 0, sizeof(*glob));
1421 out:
1422         mutex_unlock(&ttm_global_mutex);
1423 }
1424
1425 static int ttm_bo_global_init(void)
1426 {
1427         struct ttm_bo_global *glob = &ttm_bo_glob;
1428         int ret = 0;
1429         unsigned i;
1430
1431         mutex_lock(&ttm_global_mutex);
1432         if (++ttm_bo_glob_use_count > 1)
1433                 goto out;
1434
1435         ret = ttm_mem_global_init(&ttm_mem_glob);
1436         if (ret)
1437                 goto out;
1438
1439         spin_lock_init(&glob->lru_lock);
1440         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1441
1442         if (unlikely(glob->dummy_read_page == NULL)) {
1443                 ret = -ENOMEM;
1444                 goto out;
1445         }
1446
1447         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1448                 INIT_LIST_HEAD(&glob->swap_lru[i]);
1449         INIT_LIST_HEAD(&glob->device_list);
1450         atomic_set(&glob->bo_count, 0);
1451
1452         ret = kobject_init_and_add(
1453                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1454         if (unlikely(ret != 0))
1455                 kobject_put(&glob->kobj);
1456 out:
1457         mutex_unlock(&ttm_global_mutex);
1458         return ret;
1459 }
1460
1461 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1462 {
1463         struct ttm_bo_global *glob = &ttm_bo_glob;
1464         int ret = 0;
1465         unsigned i;
1466         struct ttm_resource_manager *man;
1467
1468         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
1469         ttm_resource_manager_set_used(man, false);
1470         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
1471
1472         mutex_lock(&ttm_global_mutex);
1473         list_del(&bdev->device_list);
1474         mutex_unlock(&ttm_global_mutex);
1475
1476         cancel_delayed_work_sync(&bdev->wq);
1477
1478         if (ttm_bo_delayed_delete(bdev, true))
1479                 pr_debug("Delayed destroy list was clean\n");
1480
1481         spin_lock(&glob->lru_lock);
1482         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1483                 if (list_empty(&man->lru[0]))
1484                         pr_debug("Swap list %d was clean\n", i);
1485         spin_unlock(&glob->lru_lock);
1486
1487         if (!ret)
1488                 ttm_bo_global_release();
1489
1490         return ret;
1491 }
1492 EXPORT_SYMBOL(ttm_bo_device_release);
1493
1494 static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
1495 {
1496         struct ttm_resource_manager *man = &bdev->sysman;
1497
1498         /*
1499          * Initialize the system memory buffer type.
1500          * Other types need to be driver / IOCTL initialized.
1501          */
1502         man->use_tt = true;
1503         man->available_caching = TTM_PL_MASK_CACHING;
1504         man->default_caching = TTM_PL_FLAG_CACHED;
1505
1506         ttm_resource_manager_init(man, 0);
1507         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
1508         ttm_resource_manager_set_used(man, true);
1509 }
1510
1511 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1512                        struct ttm_bo_driver *driver,
1513                        struct address_space *mapping,
1514                        struct drm_vma_offset_manager *vma_manager,
1515                        bool need_dma32)
1516 {
1517         struct ttm_bo_global *glob = &ttm_bo_glob;
1518         int ret;
1519
1520         if (WARN_ON(vma_manager == NULL))
1521                 return -EINVAL;
1522
1523         ret = ttm_bo_global_init();
1524         if (ret)
1525                 return ret;
1526
1527         bdev->driver = driver;
1528
1529         ttm_bo_init_sysman(bdev);
1530
1531         bdev->vma_manager = vma_manager;
1532         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1533         INIT_LIST_HEAD(&bdev->ddestroy);
1534         bdev->dev_mapping = mapping;
1535         bdev->need_dma32 = need_dma32;
1536         mutex_lock(&ttm_global_mutex);
1537         list_add_tail(&bdev->device_list, &glob->device_list);
1538         mutex_unlock(&ttm_global_mutex);
1539
1540         return 0;
1541 }
1542 EXPORT_SYMBOL(ttm_bo_device_init);
1543
1544 /*
1545  * buffer object vm functions.
1546  */
1547
1548 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1549 {
1550         struct ttm_bo_device *bdev = bo->bdev;
1551
1552         drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1553         ttm_mem_io_free_vm(bo);
1554 }
1555
1556 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1557 {
1558         struct ttm_bo_device *bdev = bo->bdev;
1559         struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
1560
1561         ttm_mem_io_lock(man, false);
1562         ttm_bo_unmap_virtual_locked(bo);
1563         ttm_mem_io_unlock(man);
1564 }
1565
1566
1567 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1568
1569 int ttm_bo_wait(struct ttm_buffer_object *bo,
1570                 bool interruptible, bool no_wait)
1571 {
1572         long timeout = 15 * HZ;
1573
1574         if (no_wait) {
1575                 if (dma_resv_test_signaled_rcu(bo->base.resv, true))
1576                         return 0;
1577                 else
1578                         return -EBUSY;
1579         }
1580
1581         timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
1582                                                       interruptible, timeout);
1583         if (timeout < 0)
1584                 return timeout;
1585
1586         if (timeout == 0)
1587                 return -EBUSY;
1588
1589         dma_resv_add_excl_fence(bo->base.resv, NULL);
1590         return 0;
1591 }
1592 EXPORT_SYMBOL(ttm_bo_wait);
1593
1594 /**
1595  * A buffer object shrink method that tries to swap out the first
1596  * buffer object on the bo_global::swap_lru list.
1597  */
1598 int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1599 {
1600         struct ttm_buffer_object *bo;
1601         int ret = -EBUSY;
1602         bool locked;
1603         unsigned i;
1604
1605         spin_lock(&glob->lru_lock);
1606         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1607                 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1608                         if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1609                                                             NULL))
1610                                 continue;
1611
1612                         if (!ttm_bo_get_unless_zero(bo)) {
1613                                 if (locked)
1614                                         dma_resv_unlock(bo->base.resv);
1615                                 continue;
1616                         }
1617
1618                         ret = 0;
1619                         break;
1620                 }
1621                 if (!ret)
1622                         break;
1623         }
1624
1625         if (ret) {
1626                 spin_unlock(&glob->lru_lock);
1627                 return ret;
1628         }
1629
1630         if (bo->deleted) {
1631                 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1632                 ttm_bo_put(bo);
1633                 return ret;
1634         }
1635
1636         ttm_bo_del_from_lru(bo);
1637         spin_unlock(&glob->lru_lock);
1638
1639         /**
1640          * Move to system cached
1641          */
1642
1643         if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1644             bo->ttm->caching_state != tt_cached) {
1645                 struct ttm_operation_ctx ctx = { false, false };
1646                 struct ttm_resource evict_mem;
1647
1648                 evict_mem = bo->mem;
1649                 evict_mem.mm_node = NULL;
1650                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1651                 evict_mem.mem_type = TTM_PL_SYSTEM;
1652
1653                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1654                 if (unlikely(ret != 0))
1655                         goto out;
1656         }
1657
1658         /**
1659          * Make sure BO is idle.
1660          */
1661
1662         ret = ttm_bo_wait(bo, false, false);
1663         if (unlikely(ret != 0))
1664                 goto out;
1665
1666         ttm_bo_unmap_virtual(bo);
1667
1668         /**
1669          * Swap out. Buffer will be swapped in again as soon as
1670          * anyone tries to access a ttm page.
1671          */
1672
1673         if (bo->bdev->driver->swap_notify)
1674                 bo->bdev->driver->swap_notify(bo);
1675
1676         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1677 out:
1678
1679         /**
1680          *
1681          * Unreserve without putting on LRU to avoid swapping out an
1682          * already swapped buffer.
1683          */
1684         if (locked)
1685                 dma_resv_unlock(bo->base.resv);
1686         ttm_bo_put(bo);
1687         return ret;
1688 }
1689 EXPORT_SYMBOL(ttm_bo_swapout);
1690
1691 void ttm_bo_swapout_all(void)
1692 {
1693         struct ttm_operation_ctx ctx = {
1694                 .interruptible = false,
1695                 .no_wait_gpu = false
1696         };
1697
1698         while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
1699 }
1700 EXPORT_SYMBOL(ttm_bo_swapout_all);