drm/ttm: nuke memory type flags
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42
43 struct ttm_transfer_obj {
44         struct ttm_buffer_object base;
45         struct ttm_buffer_object *bo;
46 };
47
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50         ttm_resource_free(bo, &bo->mem);
51 }
52
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54                    struct ttm_operation_ctx *ctx,
55                     struct ttm_resource *new_mem)
56 {
57         struct ttm_tt *ttm = bo->ttm;
58         struct ttm_resource *old_mem = &bo->mem;
59         int ret;
60
61         if (old_mem->mem_type != TTM_PL_SYSTEM) {
62                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64                 if (unlikely(ret != 0)) {
65                         if (ret != -ERESTARTSYS)
66                                 pr_err("Failed to expire sync object before unbinding TTM\n");
67                         return ret;
68                 }
69
70                 ttm_tt_unbind(bo->bdev, ttm);
71                 ttm_bo_free_old_node(bo);
72                 old_mem->mem_type = TTM_PL_SYSTEM;
73         }
74
75         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
76         if (unlikely(ret != 0))
77                 return ret;
78
79         if (new_mem->mem_type != TTM_PL_SYSTEM) {
80                 ret = ttm_tt_bind(bo->bdev, ttm, new_mem, ctx);
81                 if (unlikely(ret != 0))
82                         return ret;
83         }
84
85         *old_mem = *new_mem;
86         new_mem->mm_node = NULL;
87
88         return 0;
89 }
90 EXPORT_SYMBOL(ttm_bo_move_ttm);
91
92 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
93                        struct ttm_resource *mem)
94 {
95         if (mem->bus.offset || mem->bus.addr)
96                 return 0;
97
98         mem->bus.is_iomem = false;
99         if (!bdev->driver->io_mem_reserve)
100                 return 0;
101
102         return bdev->driver->io_mem_reserve(bdev, mem);
103 }
104
105 void ttm_mem_io_free(struct ttm_bo_device *bdev,
106                      struct ttm_resource *mem)
107 {
108         if (!mem->bus.offset && !mem->bus.addr)
109                 return;
110
111         if (bdev->driver->io_mem_free)
112                 bdev->driver->io_mem_free(bdev, mem);
113
114         mem->bus.offset = 0;
115         mem->bus.addr = NULL;
116 }
117
118 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
119                                struct ttm_resource *mem,
120                                void **virtual)
121 {
122         int ret;
123         void *addr;
124
125         *virtual = NULL;
126         ret = ttm_mem_io_reserve(bdev, mem);
127         if (ret || !mem->bus.is_iomem)
128                 return ret;
129
130         if (mem->bus.addr) {
131                 addr = mem->bus.addr;
132         } else {
133                 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
134
135                 if (mem->placement & TTM_PL_FLAG_WC)
136                         addr = ioremap_wc(mem->bus.offset, bus_size);
137                 else
138                         addr = ioremap(mem->bus.offset, bus_size);
139                 if (!addr) {
140                         ttm_mem_io_free(bdev, mem);
141                         return -ENOMEM;
142                 }
143         }
144         *virtual = addr;
145         return 0;
146 }
147
148 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
149                                 struct ttm_resource *mem,
150                                 void *virtual)
151 {
152         if (virtual && mem->bus.addr == NULL)
153                 iounmap(virtual);
154         ttm_mem_io_free(bdev, mem);
155 }
156
157 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
158 {
159         uint32_t *dstP =
160             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
161         uint32_t *srcP =
162             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
163
164         int i;
165         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
166                 iowrite32(ioread32(srcP++), dstP++);
167         return 0;
168 }
169
170 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
171                                 unsigned long page,
172                                 pgprot_t prot)
173 {
174         struct page *d = ttm->pages[page];
175         void *dst;
176
177         if (!d)
178                 return -ENOMEM;
179
180         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
181         dst = kmap_atomic_prot(d, prot);
182         if (!dst)
183                 return -ENOMEM;
184
185         memcpy_fromio(dst, src, PAGE_SIZE);
186
187         kunmap_atomic(dst);
188
189         return 0;
190 }
191
192 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
193                                 unsigned long page,
194                                 pgprot_t prot)
195 {
196         struct page *s = ttm->pages[page];
197         void *src;
198
199         if (!s)
200                 return -ENOMEM;
201
202         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
203         src = kmap_atomic_prot(s, prot);
204         if (!src)
205                 return -ENOMEM;
206
207         memcpy_toio(dst, src, PAGE_SIZE);
208
209         kunmap_atomic(src);
210
211         return 0;
212 }
213
214 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
215                        struct ttm_operation_ctx *ctx,
216                        struct ttm_resource *new_mem)
217 {
218         struct ttm_bo_device *bdev = bo->bdev;
219         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
220         struct ttm_tt *ttm = bo->ttm;
221         struct ttm_resource *old_mem = &bo->mem;
222         struct ttm_resource old_copy = *old_mem;
223         void *old_iomap;
224         void *new_iomap;
225         int ret;
226         unsigned long i;
227         unsigned long page;
228         unsigned long add = 0;
229         int dir;
230
231         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
232         if (ret)
233                 return ret;
234
235         ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
236         if (ret)
237                 return ret;
238         ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
239         if (ret)
240                 goto out;
241
242         /*
243          * Single TTM move. NOP.
244          */
245         if (old_iomap == NULL && new_iomap == NULL)
246                 goto out2;
247
248         /*
249          * Don't move nonexistent data. Clear destination instead.
250          */
251         if (old_iomap == NULL &&
252             (ttm == NULL || (ttm->state == tt_unpopulated &&
253                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
254                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
255                 goto out2;
256         }
257
258         /*
259          * TTM might be null for moves within the same region.
260          */
261         if (ttm) {
262                 ret = ttm_tt_populate(bdev, ttm, ctx);
263                 if (ret)
264                         goto out1;
265         }
266
267         add = 0;
268         dir = 1;
269
270         if ((old_mem->mem_type == new_mem->mem_type) &&
271             (new_mem->start < old_mem->start + old_mem->size)) {
272                 dir = -1;
273                 add = new_mem->num_pages - 1;
274         }
275
276         for (i = 0; i < new_mem->num_pages; ++i) {
277                 page = i * dir + add;
278                 if (old_iomap == NULL) {
279                         pgprot_t prot = ttm_io_prot(old_mem->placement,
280                                                     PAGE_KERNEL);
281                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
282                                                    prot);
283                 } else if (new_iomap == NULL) {
284                         pgprot_t prot = ttm_io_prot(new_mem->placement,
285                                                     PAGE_KERNEL);
286                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
287                                                    prot);
288                 } else {
289                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
290                 }
291                 if (ret)
292                         goto out1;
293         }
294         mb();
295 out2:
296         old_copy = *old_mem;
297         *old_mem = *new_mem;
298         new_mem->mm_node = NULL;
299
300         if (!man->use_tt) {
301                 ttm_tt_destroy(bdev, ttm);
302                 bo->ttm = NULL;
303         }
304
305 out1:
306         ttm_resource_iounmap(bdev, old_mem, new_iomap);
307 out:
308         ttm_resource_iounmap(bdev, &old_copy, old_iomap);
309
310         /*
311          * On error, keep the mm node!
312          */
313         if (!ret)
314                 ttm_resource_free(bo, &old_copy);
315         return ret;
316 }
317 EXPORT_SYMBOL(ttm_bo_move_memcpy);
318
319 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
320 {
321         struct ttm_transfer_obj *fbo;
322
323         fbo = container_of(bo, struct ttm_transfer_obj, base);
324         ttm_bo_put(fbo->bo);
325         kfree(fbo);
326 }
327
328 /**
329  * ttm_buffer_object_transfer
330  *
331  * @bo: A pointer to a struct ttm_buffer_object.
332  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
333  * holding the data of @bo with the old placement.
334  *
335  * This is a utility function that may be called after an accelerated move
336  * has been scheduled. A new buffer object is created as a placeholder for
337  * the old data while it's being copied. When that buffer object is idle,
338  * it can be destroyed, releasing the space of the old placement.
339  * Returns:
340  * !0: Failure.
341  */
342
343 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
344                                       struct ttm_buffer_object **new_obj)
345 {
346         struct ttm_transfer_obj *fbo;
347         int ret;
348
349         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
350         if (!fbo)
351                 return -ENOMEM;
352
353         fbo->base = *bo;
354         fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
355
356         ttm_bo_get(bo);
357         fbo->bo = bo;
358
359         /**
360          * Fix up members that we shouldn't copy directly:
361          * TODO: Explicit member copy would probably be better here.
362          */
363
364         atomic_inc(&ttm_bo_glob.bo_count);
365         INIT_LIST_HEAD(&fbo->base.ddestroy);
366         INIT_LIST_HEAD(&fbo->base.lru);
367         INIT_LIST_HEAD(&fbo->base.swap);
368         fbo->base.moving = NULL;
369         drm_vma_node_reset(&fbo->base.base.vma_node);
370
371         kref_init(&fbo->base.kref);
372         fbo->base.destroy = &ttm_transfered_destroy;
373         fbo->base.acc_size = 0;
374         if (bo->type != ttm_bo_type_sg)
375                 fbo->base.base.resv = &fbo->base.base._resv;
376
377         dma_resv_init(&fbo->base.base._resv);
378         fbo->base.base.dev = NULL;
379         ret = dma_resv_trylock(&fbo->base.base._resv);
380         WARN_ON(!ret);
381
382         *new_obj = &fbo->base;
383         return 0;
384 }
385
386 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
387 {
388         /* Cached mappings need no adjustment */
389         if (caching_flags & TTM_PL_FLAG_CACHED)
390                 return tmp;
391
392 #if defined(__i386__) || defined(__x86_64__)
393         if (caching_flags & TTM_PL_FLAG_WC)
394                 tmp = pgprot_writecombine(tmp);
395         else if (boot_cpu_data.x86 > 3)
396                 tmp = pgprot_noncached(tmp);
397 #endif
398 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
399     defined(__powerpc__) || defined(__mips__)
400         if (caching_flags & TTM_PL_FLAG_WC)
401                 tmp = pgprot_writecombine(tmp);
402         else
403                 tmp = pgprot_noncached(tmp);
404 #endif
405 #if defined(__sparc__)
406         tmp = pgprot_noncached(tmp);
407 #endif
408         return tmp;
409 }
410 EXPORT_SYMBOL(ttm_io_prot);
411
412 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
413                           unsigned long offset,
414                           unsigned long size,
415                           struct ttm_bo_kmap_obj *map)
416 {
417         struct ttm_resource *mem = &bo->mem;
418
419         if (bo->mem.bus.addr) {
420                 map->bo_kmap_type = ttm_bo_map_premapped;
421                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
422         } else {
423                 map->bo_kmap_type = ttm_bo_map_iomap;
424                 if (mem->placement & TTM_PL_FLAG_WC)
425                         map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
426                                                   size);
427                 else
428                         map->virtual = ioremap(bo->mem.bus.offset + offset,
429                                                size);
430         }
431         return (!map->virtual) ? -ENOMEM : 0;
432 }
433
434 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
435                            unsigned long start_page,
436                            unsigned long num_pages,
437                            struct ttm_bo_kmap_obj *map)
438 {
439         struct ttm_resource *mem = &bo->mem;
440         struct ttm_operation_ctx ctx = {
441                 .interruptible = false,
442                 .no_wait_gpu = false
443         };
444         struct ttm_tt *ttm = bo->ttm;
445         pgprot_t prot;
446         int ret;
447
448         BUG_ON(!ttm);
449
450         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
451         if (ret)
452                 return ret;
453
454         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
455                 /*
456                  * We're mapping a single page, and the desired
457                  * page protection is consistent with the bo.
458                  */
459
460                 map->bo_kmap_type = ttm_bo_map_kmap;
461                 map->page = ttm->pages[start_page];
462                 map->virtual = kmap(map->page);
463         } else {
464                 /*
465                  * We need to use vmap to get the desired page protection
466                  * or to make the buffer object look contiguous.
467                  */
468                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
469                 map->bo_kmap_type = ttm_bo_map_vmap;
470                 map->virtual = vmap(ttm->pages + start_page, num_pages,
471                                     0, prot);
472         }
473         return (!map->virtual) ? -ENOMEM : 0;
474 }
475
476 int ttm_bo_kmap(struct ttm_buffer_object *bo,
477                 unsigned long start_page, unsigned long num_pages,
478                 struct ttm_bo_kmap_obj *map)
479 {
480         unsigned long offset, size;
481         int ret;
482
483         map->virtual = NULL;
484         map->bo = bo;
485         if (num_pages > bo->num_pages)
486                 return -EINVAL;
487         if (start_page > bo->num_pages)
488                 return -EINVAL;
489
490         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
491         if (ret)
492                 return ret;
493         if (!bo->mem.bus.is_iomem) {
494                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
495         } else {
496                 offset = start_page << PAGE_SHIFT;
497                 size = num_pages << PAGE_SHIFT;
498                 return ttm_bo_ioremap(bo, offset, size, map);
499         }
500 }
501 EXPORT_SYMBOL(ttm_bo_kmap);
502
503 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
504 {
505         if (!map->virtual)
506                 return;
507         switch (map->bo_kmap_type) {
508         case ttm_bo_map_iomap:
509                 iounmap(map->virtual);
510                 break;
511         case ttm_bo_map_vmap:
512                 vunmap(map->virtual);
513                 break;
514         case ttm_bo_map_kmap:
515                 kunmap(map->page);
516                 break;
517         case ttm_bo_map_premapped:
518                 break;
519         default:
520                 BUG();
521         }
522         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
523         map->virtual = NULL;
524         map->page = NULL;
525 }
526 EXPORT_SYMBOL(ttm_bo_kunmap);
527
528 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529                               struct dma_fence *fence,
530                               bool evict,
531                               struct ttm_resource *new_mem)
532 {
533         struct ttm_bo_device *bdev = bo->bdev;
534         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
535         struct ttm_resource *old_mem = &bo->mem;
536         int ret;
537         struct ttm_buffer_object *ghost_obj;
538
539         dma_resv_add_excl_fence(bo->base.resv, fence);
540         if (evict) {
541                 ret = ttm_bo_wait(bo, false, false);
542                 if (ret)
543                         return ret;
544
545                 if (!man->use_tt) {
546                         ttm_tt_destroy(bdev, bo->ttm);
547                         bo->ttm = NULL;
548                 }
549                 ttm_bo_free_old_node(bo);
550         } else {
551                 /**
552                  * This should help pipeline ordinary buffer moves.
553                  *
554                  * Hang old buffer memory on a new buffer object,
555                  * and leave it to be released when the GPU
556                  * operation has completed.
557                  */
558
559                 dma_fence_put(bo->moving);
560                 bo->moving = dma_fence_get(fence);
561
562                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
563                 if (ret)
564                         return ret;
565
566                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
567
568                 /**
569                  * If we're not moving to fixed memory, the TTM object
570                  * needs to stay alive. Otherwhise hang it on the ghost
571                  * bo to be unbound and destroyed.
572                  */
573
574                 if (man->use_tt)
575                         ghost_obj->ttm = NULL;
576                 else
577                         bo->ttm = NULL;
578
579                 dma_resv_unlock(&ghost_obj->base._resv);
580                 ttm_bo_put(ghost_obj);
581         }
582
583         *old_mem = *new_mem;
584         new_mem->mm_node = NULL;
585
586         return 0;
587 }
588 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
589
590 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
591                          struct dma_fence *fence, bool evict,
592                          struct ttm_resource *new_mem)
593 {
594         struct ttm_bo_device *bdev = bo->bdev;
595         struct ttm_resource *old_mem = &bo->mem;
596
597         struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
598         struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
599
600         int ret;
601
602         dma_resv_add_excl_fence(bo->base.resv, fence);
603
604         if (!evict) {
605                 struct ttm_buffer_object *ghost_obj;
606
607                 /**
608                  * This should help pipeline ordinary buffer moves.
609                  *
610                  * Hang old buffer memory on a new buffer object,
611                  * and leave it to be released when the GPU
612                  * operation has completed.
613                  */
614
615                 dma_fence_put(bo->moving);
616                 bo->moving = dma_fence_get(fence);
617
618                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
619                 if (ret)
620                         return ret;
621
622                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
623
624                 /**
625                  * If we're not moving to fixed memory, the TTM object
626                  * needs to stay alive. Otherwhise hang it on the ghost
627                  * bo to be unbound and destroyed.
628                  */
629
630                 if (to->use_tt)
631                         ghost_obj->ttm = NULL;
632                 else
633                         bo->ttm = NULL;
634
635                 dma_resv_unlock(&ghost_obj->base._resv);
636                 ttm_bo_put(ghost_obj);
637
638         } else if (!from->use_tt) {
639
640                 /**
641                  * BO doesn't have a TTM we need to bind/unbind. Just remember
642                  * this eviction and free up the allocation
643                  */
644
645                 spin_lock(&from->move_lock);
646                 if (!from->move || dma_fence_is_later(fence, from->move)) {
647                         dma_fence_put(from->move);
648                         from->move = dma_fence_get(fence);
649                 }
650                 spin_unlock(&from->move_lock);
651
652                 ttm_bo_free_old_node(bo);
653
654                 dma_fence_put(bo->moving);
655                 bo->moving = dma_fence_get(fence);
656
657         } else {
658                 /**
659                  * Last resort, wait for the move to be completed.
660                  *
661                  * Should never happen in pratice.
662                  */
663
664                 ret = ttm_bo_wait(bo, false, false);
665                 if (ret)
666                         return ret;
667
668                 if (!to->use_tt) {
669                         ttm_tt_destroy(bdev, bo->ttm);
670                         bo->ttm = NULL;
671                 }
672                 ttm_bo_free_old_node(bo);
673         }
674
675         *old_mem = *new_mem;
676         new_mem->mm_node = NULL;
677
678         return 0;
679 }
680 EXPORT_SYMBOL(ttm_bo_pipeline_move);
681
682 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
683 {
684         struct ttm_buffer_object *ghost;
685         int ret;
686
687         ret = ttm_buffer_object_transfer(bo, &ghost);
688         if (ret)
689                 return ret;
690
691         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
692         /* Last resort, wait for the BO to be idle when we are OOM */
693         if (ret)
694                 ttm_bo_wait(bo, false, false);
695
696         memset(&bo->mem, 0, sizeof(bo->mem));
697         bo->mem.mem_type = TTM_PL_SYSTEM;
698         bo->ttm = NULL;
699
700         dma_resv_unlock(&ghost->base._resv);
701         ttm_bo_put(ghost);
702
703         return 0;
704 }