d6634a5caba24c6dc4fd6e005920cb907a3c4da3
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42
43 struct ttm_transfer_obj {
44         struct ttm_buffer_object base;
45         struct ttm_buffer_object *bo;
46 };
47
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50         ttm_resource_free(bo, &bo->mem);
51 }
52
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54                    struct ttm_operation_ctx *ctx,
55                     struct ttm_resource *new_mem)
56 {
57         struct ttm_tt *ttm = bo->ttm;
58         struct ttm_resource *old_mem = &bo->mem;
59         int ret;
60
61         if (old_mem->mem_type != TTM_PL_SYSTEM) {
62                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64                 if (unlikely(ret != 0)) {
65                         if (ret != -ERESTARTSYS)
66                                 pr_err("Failed to expire sync object before unbinding TTM\n");
67                         return ret;
68                 }
69
70                 ttm_bo_tt_unbind(bo);
71                 ttm_bo_free_old_node(bo);
72                 old_mem->mem_type = TTM_PL_SYSTEM;
73         }
74
75         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
76         if (unlikely(ret != 0))
77                 return ret;
78
79         if (new_mem->mem_type != TTM_PL_SYSTEM) {
80
81                 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82                 if (unlikely(ret != 0))
83                         return ret;
84
85                 ret = ttm_bo_tt_bind(bo, new_mem);
86                 if (unlikely(ret != 0))
87                         return ret;
88         }
89
90         *old_mem = *new_mem;
91         new_mem->mm_node = NULL;
92
93         return 0;
94 }
95 EXPORT_SYMBOL(ttm_bo_move_ttm);
96
97 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
98                        struct ttm_resource *mem)
99 {
100         if (mem->bus.offset || mem->bus.addr)
101                 return 0;
102
103         mem->bus.is_iomem = false;
104         if (!bdev->driver->io_mem_reserve)
105                 return 0;
106
107         return bdev->driver->io_mem_reserve(bdev, mem);
108 }
109
110 void ttm_mem_io_free(struct ttm_bo_device *bdev,
111                      struct ttm_resource *mem)
112 {
113         if (!mem->bus.offset && !mem->bus.addr)
114                 return;
115
116         if (bdev->driver->io_mem_free)
117                 bdev->driver->io_mem_free(bdev, mem);
118
119         mem->bus.offset = 0;
120         mem->bus.addr = NULL;
121 }
122
123 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
124                                struct ttm_resource *mem,
125                                void **virtual)
126 {
127         int ret;
128         void *addr;
129
130         *virtual = NULL;
131         ret = ttm_mem_io_reserve(bdev, mem);
132         if (ret || !mem->bus.is_iomem)
133                 return ret;
134
135         if (mem->bus.addr) {
136                 addr = mem->bus.addr;
137         } else {
138                 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
139
140                 if (mem->placement & TTM_PL_FLAG_WC)
141                         addr = ioremap_wc(mem->bus.offset, bus_size);
142                 else
143                         addr = ioremap(mem->bus.offset, bus_size);
144                 if (!addr) {
145                         ttm_mem_io_free(bdev, mem);
146                         return -ENOMEM;
147                 }
148         }
149         *virtual = addr;
150         return 0;
151 }
152
153 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
154                                 struct ttm_resource *mem,
155                                 void *virtual)
156 {
157         if (virtual && mem->bus.addr == NULL)
158                 iounmap(virtual);
159         ttm_mem_io_free(bdev, mem);
160 }
161
162 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
163 {
164         uint32_t *dstP =
165             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
166         uint32_t *srcP =
167             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
168
169         int i;
170         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
171                 iowrite32(ioread32(srcP++), dstP++);
172         return 0;
173 }
174
175 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
176                                 unsigned long page,
177                                 pgprot_t prot)
178 {
179         struct page *d = ttm->pages[page];
180         void *dst;
181
182         if (!d)
183                 return -ENOMEM;
184
185         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
186         dst = kmap_atomic_prot(d, prot);
187         if (!dst)
188                 return -ENOMEM;
189
190         memcpy_fromio(dst, src, PAGE_SIZE);
191
192         kunmap_atomic(dst);
193
194         return 0;
195 }
196
197 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
198                                 unsigned long page,
199                                 pgprot_t prot)
200 {
201         struct page *s = ttm->pages[page];
202         void *src;
203
204         if (!s)
205                 return -ENOMEM;
206
207         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
208         src = kmap_atomic_prot(s, prot);
209         if (!src)
210                 return -ENOMEM;
211
212         memcpy_toio(dst, src, PAGE_SIZE);
213
214         kunmap_atomic(src);
215
216         return 0;
217 }
218
219 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
220                        struct ttm_operation_ctx *ctx,
221                        struct ttm_resource *new_mem)
222 {
223         struct ttm_bo_device *bdev = bo->bdev;
224         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
225         struct ttm_tt *ttm = bo->ttm;
226         struct ttm_resource *old_mem = &bo->mem;
227         struct ttm_resource old_copy = *old_mem;
228         void *old_iomap;
229         void *new_iomap;
230         int ret;
231         unsigned long i;
232         unsigned long page;
233         unsigned long add = 0;
234         int dir;
235
236         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
237         if (ret)
238                 return ret;
239
240         ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
241         if (ret)
242                 return ret;
243         ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
244         if (ret)
245                 goto out;
246
247         /*
248          * Single TTM move. NOP.
249          */
250         if (old_iomap == NULL && new_iomap == NULL)
251                 goto out2;
252
253         /*
254          * Don't move nonexistent data. Clear destination instead.
255          */
256         if (old_iomap == NULL &&
257             (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
258                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
259                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
260                 goto out2;
261         }
262
263         /*
264          * TTM might be null for moves within the same region.
265          */
266         if (ttm) {
267                 ret = ttm_tt_populate(bdev, ttm, ctx);
268                 if (ret)
269                         goto out1;
270         }
271
272         add = 0;
273         dir = 1;
274
275         if ((old_mem->mem_type == new_mem->mem_type) &&
276             (new_mem->start < old_mem->start + old_mem->size)) {
277                 dir = -1;
278                 add = new_mem->num_pages - 1;
279         }
280
281         for (i = 0; i < new_mem->num_pages; ++i) {
282                 page = i * dir + add;
283                 if (old_iomap == NULL) {
284                         pgprot_t prot = ttm_io_prot(old_mem->placement,
285                                                     PAGE_KERNEL);
286                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
287                                                    prot);
288                 } else if (new_iomap == NULL) {
289                         pgprot_t prot = ttm_io_prot(new_mem->placement,
290                                                     PAGE_KERNEL);
291                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
292                                                    prot);
293                 } else {
294                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
295                 }
296                 if (ret)
297                         goto out1;
298         }
299         mb();
300 out2:
301         old_copy = *old_mem;
302         *old_mem = *new_mem;
303         new_mem->mm_node = NULL;
304
305         if (!man->use_tt)
306                 ttm_bo_tt_destroy(bo);
307
308 out1:
309         ttm_resource_iounmap(bdev, old_mem, new_iomap);
310 out:
311         ttm_resource_iounmap(bdev, &old_copy, old_iomap);
312
313         /*
314          * On error, keep the mm node!
315          */
316         if (!ret)
317                 ttm_resource_free(bo, &old_copy);
318         return ret;
319 }
320 EXPORT_SYMBOL(ttm_bo_move_memcpy);
321
322 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
323 {
324         struct ttm_transfer_obj *fbo;
325
326         fbo = container_of(bo, struct ttm_transfer_obj, base);
327         ttm_bo_put(fbo->bo);
328         kfree(fbo);
329 }
330
331 /**
332  * ttm_buffer_object_transfer
333  *
334  * @bo: A pointer to a struct ttm_buffer_object.
335  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
336  * holding the data of @bo with the old placement.
337  *
338  * This is a utility function that may be called after an accelerated move
339  * has been scheduled. A new buffer object is created as a placeholder for
340  * the old data while it's being copied. When that buffer object is idle,
341  * it can be destroyed, releasing the space of the old placement.
342  * Returns:
343  * !0: Failure.
344  */
345
346 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
347                                       struct ttm_buffer_object **new_obj)
348 {
349         struct ttm_transfer_obj *fbo;
350         int ret;
351
352         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
353         if (!fbo)
354                 return -ENOMEM;
355
356         fbo->base = *bo;
357         fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
358
359         ttm_bo_get(bo);
360         fbo->bo = bo;
361
362         /**
363          * Fix up members that we shouldn't copy directly:
364          * TODO: Explicit member copy would probably be better here.
365          */
366
367         atomic_inc(&ttm_bo_glob.bo_count);
368         INIT_LIST_HEAD(&fbo->base.ddestroy);
369         INIT_LIST_HEAD(&fbo->base.lru);
370         INIT_LIST_HEAD(&fbo->base.swap);
371         fbo->base.moving = NULL;
372         drm_vma_node_reset(&fbo->base.base.vma_node);
373
374         kref_init(&fbo->base.kref);
375         fbo->base.destroy = &ttm_transfered_destroy;
376         fbo->base.acc_size = 0;
377         if (bo->type != ttm_bo_type_sg)
378                 fbo->base.base.resv = &fbo->base.base._resv;
379
380         dma_resv_init(&fbo->base.base._resv);
381         fbo->base.base.dev = NULL;
382         ret = dma_resv_trylock(&fbo->base.base._resv);
383         WARN_ON(!ret);
384
385         *new_obj = &fbo->base;
386         return 0;
387 }
388
389 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
390 {
391         /* Cached mappings need no adjustment */
392         if (caching_flags & TTM_PL_FLAG_CACHED)
393                 return tmp;
394
395 #if defined(__i386__) || defined(__x86_64__)
396         if (caching_flags & TTM_PL_FLAG_WC)
397                 tmp = pgprot_writecombine(tmp);
398         else if (boot_cpu_data.x86 > 3)
399                 tmp = pgprot_noncached(tmp);
400 #endif
401 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
402     defined(__powerpc__) || defined(__mips__)
403         if (caching_flags & TTM_PL_FLAG_WC)
404                 tmp = pgprot_writecombine(tmp);
405         else
406                 tmp = pgprot_noncached(tmp);
407 #endif
408 #if defined(__sparc__)
409         tmp = pgprot_noncached(tmp);
410 #endif
411         return tmp;
412 }
413 EXPORT_SYMBOL(ttm_io_prot);
414
415 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
416                           unsigned long offset,
417                           unsigned long size,
418                           struct ttm_bo_kmap_obj *map)
419 {
420         struct ttm_resource *mem = &bo->mem;
421
422         if (bo->mem.bus.addr) {
423                 map->bo_kmap_type = ttm_bo_map_premapped;
424                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
425         } else {
426                 map->bo_kmap_type = ttm_bo_map_iomap;
427                 if (mem->placement & TTM_PL_FLAG_WC)
428                         map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
429                                                   size);
430                 else
431                         map->virtual = ioremap(bo->mem.bus.offset + offset,
432                                                size);
433         }
434         return (!map->virtual) ? -ENOMEM : 0;
435 }
436
437 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
438                            unsigned long start_page,
439                            unsigned long num_pages,
440                            struct ttm_bo_kmap_obj *map)
441 {
442         struct ttm_resource *mem = &bo->mem;
443         struct ttm_operation_ctx ctx = {
444                 .interruptible = false,
445                 .no_wait_gpu = false
446         };
447         struct ttm_tt *ttm = bo->ttm;
448         pgprot_t prot;
449         int ret;
450
451         BUG_ON(!ttm);
452
453         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
454         if (ret)
455                 return ret;
456
457         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
458                 /*
459                  * We're mapping a single page, and the desired
460                  * page protection is consistent with the bo.
461                  */
462
463                 map->bo_kmap_type = ttm_bo_map_kmap;
464                 map->page = ttm->pages[start_page];
465                 map->virtual = kmap(map->page);
466         } else {
467                 /*
468                  * We need to use vmap to get the desired page protection
469                  * or to make the buffer object look contiguous.
470                  */
471                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
472                 map->bo_kmap_type = ttm_bo_map_vmap;
473                 map->virtual = vmap(ttm->pages + start_page, num_pages,
474                                     0, prot);
475         }
476         return (!map->virtual) ? -ENOMEM : 0;
477 }
478
479 int ttm_bo_kmap(struct ttm_buffer_object *bo,
480                 unsigned long start_page, unsigned long num_pages,
481                 struct ttm_bo_kmap_obj *map)
482 {
483         unsigned long offset, size;
484         int ret;
485
486         map->virtual = NULL;
487         map->bo = bo;
488         if (num_pages > bo->num_pages)
489                 return -EINVAL;
490         if (start_page > bo->num_pages)
491                 return -EINVAL;
492
493         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
494         if (ret)
495                 return ret;
496         if (!bo->mem.bus.is_iomem) {
497                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
498         } else {
499                 offset = start_page << PAGE_SHIFT;
500                 size = num_pages << PAGE_SHIFT;
501                 return ttm_bo_ioremap(bo, offset, size, map);
502         }
503 }
504 EXPORT_SYMBOL(ttm_bo_kmap);
505
506 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
507 {
508         if (!map->virtual)
509                 return;
510         switch (map->bo_kmap_type) {
511         case ttm_bo_map_iomap:
512                 iounmap(map->virtual);
513                 break;
514         case ttm_bo_map_vmap:
515                 vunmap(map->virtual);
516                 break;
517         case ttm_bo_map_kmap:
518                 kunmap(map->page);
519                 break;
520         case ttm_bo_map_premapped:
521                 break;
522         default:
523                 BUG();
524         }
525         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
526         map->virtual = NULL;
527         map->page = NULL;
528 }
529 EXPORT_SYMBOL(ttm_bo_kunmap);
530
531 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
532                               struct dma_fence *fence,
533                               bool evict,
534                               struct ttm_resource *new_mem)
535 {
536         struct ttm_bo_device *bdev = bo->bdev;
537         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
538         struct ttm_resource *old_mem = &bo->mem;
539         int ret;
540         struct ttm_buffer_object *ghost_obj;
541
542         dma_resv_add_excl_fence(bo->base.resv, fence);
543         if (evict) {
544                 ret = ttm_bo_wait(bo, false, false);
545                 if (ret)
546                         return ret;
547
548                 if (!man->use_tt)
549                         ttm_bo_tt_destroy(bo);
550                 ttm_bo_free_old_node(bo);
551         } else {
552                 /**
553                  * This should help pipeline ordinary buffer moves.
554                  *
555                  * Hang old buffer memory on a new buffer object,
556                  * and leave it to be released when the GPU
557                  * operation has completed.
558                  */
559
560                 dma_fence_put(bo->moving);
561                 bo->moving = dma_fence_get(fence);
562
563                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
564                 if (ret)
565                         return ret;
566
567                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
568
569                 /**
570                  * If we're not moving to fixed memory, the TTM object
571                  * needs to stay alive. Otherwhise hang it on the ghost
572                  * bo to be unbound and destroyed.
573                  */
574
575                 if (man->use_tt)
576                         ghost_obj->ttm = NULL;
577                 else
578                         bo->ttm = NULL;
579
580                 dma_resv_unlock(&ghost_obj->base._resv);
581                 ttm_bo_put(ghost_obj);
582         }
583
584         *old_mem = *new_mem;
585         new_mem->mm_node = NULL;
586
587         return 0;
588 }
589 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
590
591 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
592                          struct dma_fence *fence, bool evict,
593                          struct ttm_resource *new_mem)
594 {
595         struct ttm_bo_device *bdev = bo->bdev;
596         struct ttm_resource *old_mem = &bo->mem;
597
598         struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
599         struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
600
601         int ret;
602
603         dma_resv_add_excl_fence(bo->base.resv, fence);
604
605         if (!evict) {
606                 struct ttm_buffer_object *ghost_obj;
607
608                 /**
609                  * This should help pipeline ordinary buffer moves.
610                  *
611                  * Hang old buffer memory on a new buffer object,
612                  * and leave it to be released when the GPU
613                  * operation has completed.
614                  */
615
616                 dma_fence_put(bo->moving);
617                 bo->moving = dma_fence_get(fence);
618
619                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
620                 if (ret)
621                         return ret;
622
623                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
624
625                 /**
626                  * If we're not moving to fixed memory, the TTM object
627                  * needs to stay alive. Otherwhise hang it on the ghost
628                  * bo to be unbound and destroyed.
629                  */
630
631                 if (to->use_tt)
632                         ghost_obj->ttm = NULL;
633                 else
634                         bo->ttm = NULL;
635
636                 dma_resv_unlock(&ghost_obj->base._resv);
637                 ttm_bo_put(ghost_obj);
638
639         } else if (!from->use_tt) {
640
641                 /**
642                  * BO doesn't have a TTM we need to bind/unbind. Just remember
643                  * this eviction and free up the allocation
644                  */
645
646                 spin_lock(&from->move_lock);
647                 if (!from->move || dma_fence_is_later(fence, from->move)) {
648                         dma_fence_put(from->move);
649                         from->move = dma_fence_get(fence);
650                 }
651                 spin_unlock(&from->move_lock);
652
653                 ttm_bo_free_old_node(bo);
654
655                 dma_fence_put(bo->moving);
656                 bo->moving = dma_fence_get(fence);
657
658         } else {
659                 /**
660                  * Last resort, wait for the move to be completed.
661                  *
662                  * Should never happen in pratice.
663                  */
664
665                 ret = ttm_bo_wait(bo, false, false);
666                 if (ret)
667                         return ret;
668
669                 if (!to->use_tt)
670                         ttm_bo_tt_destroy(bo);
671                 ttm_bo_free_old_node(bo);
672         }
673
674         *old_mem = *new_mem;
675         new_mem->mm_node = NULL;
676
677         return 0;
678 }
679 EXPORT_SYMBOL(ttm_bo_pipeline_move);
680
681 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
682 {
683         struct ttm_buffer_object *ghost;
684         int ret;
685
686         ret = ttm_buffer_object_transfer(bo, &ghost);
687         if (ret)
688                 return ret;
689
690         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
691         /* Last resort, wait for the BO to be idle when we are OOM */
692         if (ret)
693                 ttm_bo_wait(bo, false, false);
694
695         memset(&bo->mem, 0, sizeof(bo->mem));
696         bo->mem.mem_type = TTM_PL_SYSTEM;
697         bo->ttm = NULL;
698
699         dma_resv_unlock(&ghost->base._resv);
700         ttm_bo_put(ghost);
701
702         return 0;
703 }