5ac8da43238bece10beedb23e8b67491567b4475
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42
43 struct ttm_transfer_obj {
44         struct ttm_buffer_object base;
45         struct ttm_buffer_object *bo;
46 };
47
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50         ttm_resource_free(bo, &bo->mem);
51 }
52
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54                    struct ttm_operation_ctx *ctx,
55                     struct ttm_resource *new_mem)
56 {
57         struct ttm_tt *ttm = bo->ttm;
58         struct ttm_resource *old_mem = &bo->mem;
59         int ret;
60
61         if (old_mem->mem_type != TTM_PL_SYSTEM) {
62                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64                 if (unlikely(ret != 0)) {
65                         if (ret != -ERESTARTSYS)
66                                 pr_err("Failed to expire sync object before unbinding TTM\n");
67                         return ret;
68                 }
69
70                 ttm_tt_unbind(bo->bdev, ttm);
71                 ttm_bo_free_old_node(bo);
72                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73                                 TTM_PL_MASK_MEM);
74                 old_mem->mem_type = TTM_PL_SYSTEM;
75         }
76
77         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78         if (unlikely(ret != 0))
79                 return ret;
80
81         if (new_mem->mem_type != TTM_PL_SYSTEM) {
82                 ret = ttm_tt_bind(bo->bdev, ttm, new_mem, ctx);
83                 if (unlikely(ret != 0))
84                         return ret;
85         }
86
87         *old_mem = *new_mem;
88         new_mem->mm_node = NULL;
89
90         return 0;
91 }
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
93
94 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
95                        struct ttm_resource *mem)
96 {
97         if (mem->bus.offset || mem->bus.addr)
98                 return 0;
99
100         mem->bus.is_iomem = false;
101         if (!bdev->driver->io_mem_reserve)
102                 return 0;
103
104         return bdev->driver->io_mem_reserve(bdev, mem);
105 }
106
107 void ttm_mem_io_free(struct ttm_bo_device *bdev,
108                      struct ttm_resource *mem)
109 {
110         if (!mem->bus.offset && !mem->bus.addr)
111                 return;
112
113         if (bdev->driver->io_mem_free)
114                 bdev->driver->io_mem_free(bdev, mem);
115
116         mem->bus.offset = 0;
117         mem->bus.addr = NULL;
118 }
119
120 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
121                                struct ttm_resource *mem,
122                                void **virtual)
123 {
124         int ret;
125         void *addr;
126
127         *virtual = NULL;
128         ret = ttm_mem_io_reserve(bdev, mem);
129         if (ret || !mem->bus.is_iomem)
130                 return ret;
131
132         if (mem->bus.addr) {
133                 addr = mem->bus.addr;
134         } else {
135                 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
136
137                 if (mem->placement & TTM_PL_FLAG_WC)
138                         addr = ioremap_wc(mem->bus.offset, bus_size);
139                 else
140                         addr = ioremap(mem->bus.offset, bus_size);
141                 if (!addr) {
142                         ttm_mem_io_free(bdev, mem);
143                         return -ENOMEM;
144                 }
145         }
146         *virtual = addr;
147         return 0;
148 }
149
150 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
151                                 struct ttm_resource *mem,
152                                 void *virtual)
153 {
154         if (virtual && mem->bus.addr == NULL)
155                 iounmap(virtual);
156         ttm_mem_io_free(bdev, mem);
157 }
158
159 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
160 {
161         uint32_t *dstP =
162             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
163         uint32_t *srcP =
164             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
165
166         int i;
167         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
168                 iowrite32(ioread32(srcP++), dstP++);
169         return 0;
170 }
171
172 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
173                                 unsigned long page,
174                                 pgprot_t prot)
175 {
176         struct page *d = ttm->pages[page];
177         void *dst;
178
179         if (!d)
180                 return -ENOMEM;
181
182         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
183         dst = kmap_atomic_prot(d, prot);
184         if (!dst)
185                 return -ENOMEM;
186
187         memcpy_fromio(dst, src, PAGE_SIZE);
188
189         kunmap_atomic(dst);
190
191         return 0;
192 }
193
194 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
195                                 unsigned long page,
196                                 pgprot_t prot)
197 {
198         struct page *s = ttm->pages[page];
199         void *src;
200
201         if (!s)
202                 return -ENOMEM;
203
204         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
205         src = kmap_atomic_prot(s, prot);
206         if (!src)
207                 return -ENOMEM;
208
209         memcpy_toio(dst, src, PAGE_SIZE);
210
211         kunmap_atomic(src);
212
213         return 0;
214 }
215
216 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
217                        struct ttm_operation_ctx *ctx,
218                        struct ttm_resource *new_mem)
219 {
220         struct ttm_bo_device *bdev = bo->bdev;
221         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
222         struct ttm_tt *ttm = bo->ttm;
223         struct ttm_resource *old_mem = &bo->mem;
224         struct ttm_resource old_copy = *old_mem;
225         void *old_iomap;
226         void *new_iomap;
227         int ret;
228         unsigned long i;
229         unsigned long page;
230         unsigned long add = 0;
231         int dir;
232
233         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
234         if (ret)
235                 return ret;
236
237         ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
238         if (ret)
239                 return ret;
240         ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
241         if (ret)
242                 goto out;
243
244         /*
245          * Single TTM move. NOP.
246          */
247         if (old_iomap == NULL && new_iomap == NULL)
248                 goto out2;
249
250         /*
251          * Don't move nonexistent data. Clear destination instead.
252          */
253         if (old_iomap == NULL &&
254             (ttm == NULL || (ttm->state == tt_unpopulated &&
255                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
256                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
257                 goto out2;
258         }
259
260         /*
261          * TTM might be null for moves within the same region.
262          */
263         if (ttm) {
264                 ret = ttm_tt_populate(bdev, ttm, ctx);
265                 if (ret)
266                         goto out1;
267         }
268
269         add = 0;
270         dir = 1;
271
272         if ((old_mem->mem_type == new_mem->mem_type) &&
273             (new_mem->start < old_mem->start + old_mem->size)) {
274                 dir = -1;
275                 add = new_mem->num_pages - 1;
276         }
277
278         for (i = 0; i < new_mem->num_pages; ++i) {
279                 page = i * dir + add;
280                 if (old_iomap == NULL) {
281                         pgprot_t prot = ttm_io_prot(old_mem->placement,
282                                                     PAGE_KERNEL);
283                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
284                                                    prot);
285                 } else if (new_iomap == NULL) {
286                         pgprot_t prot = ttm_io_prot(new_mem->placement,
287                                                     PAGE_KERNEL);
288                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
289                                                    prot);
290                 } else {
291                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
292                 }
293                 if (ret)
294                         goto out1;
295         }
296         mb();
297 out2:
298         old_copy = *old_mem;
299         *old_mem = *new_mem;
300         new_mem->mm_node = NULL;
301
302         if (!man->use_tt) {
303                 ttm_tt_destroy(bdev, ttm);
304                 bo->ttm = NULL;
305         }
306
307 out1:
308         ttm_resource_iounmap(bdev, old_mem, new_iomap);
309 out:
310         ttm_resource_iounmap(bdev, &old_copy, old_iomap);
311
312         /*
313          * On error, keep the mm node!
314          */
315         if (!ret)
316                 ttm_resource_free(bo, &old_copy);
317         return ret;
318 }
319 EXPORT_SYMBOL(ttm_bo_move_memcpy);
320
321 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
322 {
323         struct ttm_transfer_obj *fbo;
324
325         fbo = container_of(bo, struct ttm_transfer_obj, base);
326         ttm_bo_put(fbo->bo);
327         kfree(fbo);
328 }
329
330 /**
331  * ttm_buffer_object_transfer
332  *
333  * @bo: A pointer to a struct ttm_buffer_object.
334  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
335  * holding the data of @bo with the old placement.
336  *
337  * This is a utility function that may be called after an accelerated move
338  * has been scheduled. A new buffer object is created as a placeholder for
339  * the old data while it's being copied. When that buffer object is idle,
340  * it can be destroyed, releasing the space of the old placement.
341  * Returns:
342  * !0: Failure.
343  */
344
345 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
346                                       struct ttm_buffer_object **new_obj)
347 {
348         struct ttm_transfer_obj *fbo;
349         int ret;
350
351         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
352         if (!fbo)
353                 return -ENOMEM;
354
355         fbo->base = *bo;
356         fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
357
358         ttm_bo_get(bo);
359         fbo->bo = bo;
360
361         /**
362          * Fix up members that we shouldn't copy directly:
363          * TODO: Explicit member copy would probably be better here.
364          */
365
366         atomic_inc(&ttm_bo_glob.bo_count);
367         INIT_LIST_HEAD(&fbo->base.ddestroy);
368         INIT_LIST_HEAD(&fbo->base.lru);
369         INIT_LIST_HEAD(&fbo->base.swap);
370         fbo->base.moving = NULL;
371         drm_vma_node_reset(&fbo->base.base.vma_node);
372
373         kref_init(&fbo->base.kref);
374         fbo->base.destroy = &ttm_transfered_destroy;
375         fbo->base.acc_size = 0;
376         if (bo->type != ttm_bo_type_sg)
377                 fbo->base.base.resv = &fbo->base.base._resv;
378
379         dma_resv_init(&fbo->base.base._resv);
380         fbo->base.base.dev = NULL;
381         ret = dma_resv_trylock(&fbo->base.base._resv);
382         WARN_ON(!ret);
383
384         *new_obj = &fbo->base;
385         return 0;
386 }
387
388 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
389 {
390         /* Cached mappings need no adjustment */
391         if (caching_flags & TTM_PL_FLAG_CACHED)
392                 return tmp;
393
394 #if defined(__i386__) || defined(__x86_64__)
395         if (caching_flags & TTM_PL_FLAG_WC)
396                 tmp = pgprot_writecombine(tmp);
397         else if (boot_cpu_data.x86 > 3)
398                 tmp = pgprot_noncached(tmp);
399 #endif
400 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
401     defined(__powerpc__) || defined(__mips__)
402         if (caching_flags & TTM_PL_FLAG_WC)
403                 tmp = pgprot_writecombine(tmp);
404         else
405                 tmp = pgprot_noncached(tmp);
406 #endif
407 #if defined(__sparc__)
408         tmp = pgprot_noncached(tmp);
409 #endif
410         return tmp;
411 }
412 EXPORT_SYMBOL(ttm_io_prot);
413
414 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
415                           unsigned long offset,
416                           unsigned long size,
417                           struct ttm_bo_kmap_obj *map)
418 {
419         struct ttm_resource *mem = &bo->mem;
420
421         if (bo->mem.bus.addr) {
422                 map->bo_kmap_type = ttm_bo_map_premapped;
423                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
424         } else {
425                 map->bo_kmap_type = ttm_bo_map_iomap;
426                 if (mem->placement & TTM_PL_FLAG_WC)
427                         map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
428                                                   size);
429                 else
430                         map->virtual = ioremap(bo->mem.bus.offset + offset,
431                                                size);
432         }
433         return (!map->virtual) ? -ENOMEM : 0;
434 }
435
436 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
437                            unsigned long start_page,
438                            unsigned long num_pages,
439                            struct ttm_bo_kmap_obj *map)
440 {
441         struct ttm_resource *mem = &bo->mem;
442         struct ttm_operation_ctx ctx = {
443                 .interruptible = false,
444                 .no_wait_gpu = false
445         };
446         struct ttm_tt *ttm = bo->ttm;
447         pgprot_t prot;
448         int ret;
449
450         BUG_ON(!ttm);
451
452         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
453         if (ret)
454                 return ret;
455
456         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
457                 /*
458                  * We're mapping a single page, and the desired
459                  * page protection is consistent with the bo.
460                  */
461
462                 map->bo_kmap_type = ttm_bo_map_kmap;
463                 map->page = ttm->pages[start_page];
464                 map->virtual = kmap(map->page);
465         } else {
466                 /*
467                  * We need to use vmap to get the desired page protection
468                  * or to make the buffer object look contiguous.
469                  */
470                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
471                 map->bo_kmap_type = ttm_bo_map_vmap;
472                 map->virtual = vmap(ttm->pages + start_page, num_pages,
473                                     0, prot);
474         }
475         return (!map->virtual) ? -ENOMEM : 0;
476 }
477
478 int ttm_bo_kmap(struct ttm_buffer_object *bo,
479                 unsigned long start_page, unsigned long num_pages,
480                 struct ttm_bo_kmap_obj *map)
481 {
482         unsigned long offset, size;
483         int ret;
484
485         map->virtual = NULL;
486         map->bo = bo;
487         if (num_pages > bo->num_pages)
488                 return -EINVAL;
489         if (start_page > bo->num_pages)
490                 return -EINVAL;
491
492         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
493         if (ret)
494                 return ret;
495         if (!bo->mem.bus.is_iomem) {
496                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
497         } else {
498                 offset = start_page << PAGE_SHIFT;
499                 size = num_pages << PAGE_SHIFT;
500                 return ttm_bo_ioremap(bo, offset, size, map);
501         }
502 }
503 EXPORT_SYMBOL(ttm_bo_kmap);
504
505 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
506 {
507         if (!map->virtual)
508                 return;
509         switch (map->bo_kmap_type) {
510         case ttm_bo_map_iomap:
511                 iounmap(map->virtual);
512                 break;
513         case ttm_bo_map_vmap:
514                 vunmap(map->virtual);
515                 break;
516         case ttm_bo_map_kmap:
517                 kunmap(map->page);
518                 break;
519         case ttm_bo_map_premapped:
520                 break;
521         default:
522                 BUG();
523         }
524         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
525         map->virtual = NULL;
526         map->page = NULL;
527 }
528 EXPORT_SYMBOL(ttm_bo_kunmap);
529
530 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
531                               struct dma_fence *fence,
532                               bool evict,
533                               struct ttm_resource *new_mem)
534 {
535         struct ttm_bo_device *bdev = bo->bdev;
536         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
537         struct ttm_resource *old_mem = &bo->mem;
538         int ret;
539         struct ttm_buffer_object *ghost_obj;
540
541         dma_resv_add_excl_fence(bo->base.resv, fence);
542         if (evict) {
543                 ret = ttm_bo_wait(bo, false, false);
544                 if (ret)
545                         return ret;
546
547                 if (!man->use_tt) {
548                         ttm_tt_destroy(bdev, bo->ttm);
549                         bo->ttm = NULL;
550                 }
551                 ttm_bo_free_old_node(bo);
552         } else {
553                 /**
554                  * This should help pipeline ordinary buffer moves.
555                  *
556                  * Hang old buffer memory on a new buffer object,
557                  * and leave it to be released when the GPU
558                  * operation has completed.
559                  */
560
561                 dma_fence_put(bo->moving);
562                 bo->moving = dma_fence_get(fence);
563
564                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
565                 if (ret)
566                         return ret;
567
568                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
569
570                 /**
571                  * If we're not moving to fixed memory, the TTM object
572                  * needs to stay alive. Otherwhise hang it on the ghost
573                  * bo to be unbound and destroyed.
574                  */
575
576                 if (man->use_tt)
577                         ghost_obj->ttm = NULL;
578                 else
579                         bo->ttm = NULL;
580
581                 dma_resv_unlock(&ghost_obj->base._resv);
582                 ttm_bo_put(ghost_obj);
583         }
584
585         *old_mem = *new_mem;
586         new_mem->mm_node = NULL;
587
588         return 0;
589 }
590 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
591
592 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
593                          struct dma_fence *fence, bool evict,
594                          struct ttm_resource *new_mem)
595 {
596         struct ttm_bo_device *bdev = bo->bdev;
597         struct ttm_resource *old_mem = &bo->mem;
598
599         struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
600         struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
601
602         int ret;
603
604         dma_resv_add_excl_fence(bo->base.resv, fence);
605
606         if (!evict) {
607                 struct ttm_buffer_object *ghost_obj;
608
609                 /**
610                  * This should help pipeline ordinary buffer moves.
611                  *
612                  * Hang old buffer memory on a new buffer object,
613                  * and leave it to be released when the GPU
614                  * operation has completed.
615                  */
616
617                 dma_fence_put(bo->moving);
618                 bo->moving = dma_fence_get(fence);
619
620                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
621                 if (ret)
622                         return ret;
623
624                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
625
626                 /**
627                  * If we're not moving to fixed memory, the TTM object
628                  * needs to stay alive. Otherwhise hang it on the ghost
629                  * bo to be unbound and destroyed.
630                  */
631
632                 if (to->use_tt)
633                         ghost_obj->ttm = NULL;
634                 else
635                         bo->ttm = NULL;
636
637                 dma_resv_unlock(&ghost_obj->base._resv);
638                 ttm_bo_put(ghost_obj);
639
640         } else if (!from->use_tt) {
641
642                 /**
643                  * BO doesn't have a TTM we need to bind/unbind. Just remember
644                  * this eviction and free up the allocation
645                  */
646
647                 spin_lock(&from->move_lock);
648                 if (!from->move || dma_fence_is_later(fence, from->move)) {
649                         dma_fence_put(from->move);
650                         from->move = dma_fence_get(fence);
651                 }
652                 spin_unlock(&from->move_lock);
653
654                 ttm_bo_free_old_node(bo);
655
656                 dma_fence_put(bo->moving);
657                 bo->moving = dma_fence_get(fence);
658
659         } else {
660                 /**
661                  * Last resort, wait for the move to be completed.
662                  *
663                  * Should never happen in pratice.
664                  */
665
666                 ret = ttm_bo_wait(bo, false, false);
667                 if (ret)
668                         return ret;
669
670                 if (!to->use_tt) {
671                         ttm_tt_destroy(bdev, bo->ttm);
672                         bo->ttm = NULL;
673                 }
674                 ttm_bo_free_old_node(bo);
675         }
676
677         *old_mem = *new_mem;
678         new_mem->mm_node = NULL;
679
680         return 0;
681 }
682 EXPORT_SYMBOL(ttm_bo_pipeline_move);
683
684 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
685 {
686         struct ttm_buffer_object *ghost;
687         int ret;
688
689         ret = ttm_buffer_object_transfer(bo, &ghost);
690         if (ret)
691                 return ret;
692
693         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
694         /* Last resort, wait for the BO to be idle when we are OOM */
695         if (ret)
696                 ttm_bo_wait(bo, false, false);
697
698         memset(&bo->mem, 0, sizeof(bo->mem));
699         bo->mem.mem_type = TTM_PL_SYSTEM;
700         bo->ttm = NULL;
701
702         dma_resv_unlock(&ghost->base._resv);
703         ttm_bo_put(ghost);
704
705         return 0;
706 }