drm/ttm: Don't print error message if eviction was interrupted
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_bo.c
index c3f4b33..459f1b4 100644 (file)
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
 #include <linux/jiffies.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
@@ -118,8 +120,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
        bool old_use_tt, new_use_tt;
        int ret;
 
-       old_use_tt = bo->resource &&
-               ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+       old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
        new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
 
        ttm_bo_unmap_virtual(bo);
@@ -280,14 +281,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                ret = 0;
        }
 
-       if (ret || unlikely(list_empty(&bo->ddestroy))) {
+       if (ret) {
                if (unlock_resv)
                        dma_resv_unlock(bo->base.resv);
                spin_unlock(&bo->bdev->lru_lock);
                return ret;
        }
 
-       list_del_init(&bo->ddestroy);
        spin_unlock(&bo->bdev->lru_lock);
        ttm_bo_cleanup_memtype_use(bo);
 
@@ -300,47 +300,21 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 }
 
 /*
- * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
- * encountered buffers.
+ * Block for the dma_resv object to become idle, lock the buffer and clean up
+ * the resource and tt object.
  */
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
+static void ttm_bo_delayed_delete(struct work_struct *work)
 {
-       struct list_head removed;
-       bool empty;
-
-       INIT_LIST_HEAD(&removed);
-
-       spin_lock(&bdev->lru_lock);
-       while (!list_empty(&bdev->ddestroy)) {
-               struct ttm_buffer_object *bo;
-
-               bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
-                                     ddestroy);
-               list_move_tail(&bo->ddestroy, &removed);
-               if (!ttm_bo_get_unless_zero(bo))
-                       continue;
-
-               if (remove_all || bo->base.resv != &bo->base._resv) {
-                       spin_unlock(&bdev->lru_lock);
-                       dma_resv_lock(bo->base.resv, NULL);
-
-                       spin_lock(&bdev->lru_lock);
-                       ttm_bo_cleanup_refs(bo, false, !remove_all, true);
-
-               } else if (dma_resv_trylock(bo->base.resv)) {
-                       ttm_bo_cleanup_refs(bo, false, !remove_all, true);
-               } else {
-                       spin_unlock(&bdev->lru_lock);
-               }
+       struct ttm_buffer_object *bo;
 
-               ttm_bo_put(bo);
-               spin_lock(&bdev->lru_lock);
-       }
-       list_splice_tail(&removed, &bdev->ddestroy);
-       empty = list_empty(&bdev->ddestroy);
-       spin_unlock(&bdev->lru_lock);
+       bo = container_of(work, typeof(*bo), delayed_delete);
 
-       return empty;
+       dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+                             MAX_SCHEDULE_TIMEOUT);
+       dma_resv_lock(bo->base.resv, NULL);
+       ttm_bo_cleanup_memtype_use(bo);
+       dma_resv_unlock(bo->base.resv);
+       ttm_bo_put(bo);
 }
 
 static void ttm_bo_release(struct kref *kref)
@@ -369,69 +343,58 @@ static void ttm_bo_release(struct kref *kref)
 
                drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
                ttm_mem_io_free(bdev, bo->resource);
-       }
-
-       if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
-           !dma_resv_trylock(bo->base.resv)) {
-               /* The BO is not idle, resurrect it for delayed destroy */
-               ttm_bo_flush_all_fences(bo);
-               bo->deleted = true;
 
-               spin_lock(&bo->bdev->lru_lock);
+               if (!dma_resv_test_signaled(bo->base.resv,
+                                           DMA_RESV_USAGE_BOOKKEEP) ||
+                   !dma_resv_trylock(bo->base.resv)) {
+                       /* The BO is not idle, resurrect it for delayed destroy */
+                       ttm_bo_flush_all_fences(bo);
+                       bo->deleted = true;
 
-               /*
-                * Make pinned bos immediately available to
-                * shrinkers, now that they are queued for
-                * destruction.
-                *
-                * FIXME: QXL is triggering this. Can be removed when the
-                * driver is fixed.
-                */
-               if (bo->pin_count) {
-                       bo->pin_count = 0;
-                       ttm_resource_move_to_lru_tail(bo->resource);
-               }
+                       spin_lock(&bo->bdev->lru_lock);
 
-               kref_init(&bo->kref);
-               list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-               spin_unlock(&bo->bdev->lru_lock);
+                       /*
+                        * Make pinned bos immediately available to
+                        * shrinkers, now that they are queued for
+                        * destruction.
+                        *
+                        * FIXME: QXL is triggering this. Can be removed when the
+                        * driver is fixed.
+                        */
+                       if (bo->pin_count) {
+                               bo->pin_count = 0;
+                               ttm_resource_move_to_lru_tail(bo->resource);
+                       }
 
-               schedule_delayed_work(&bdev->wq,
-                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
-               return;
-       }
+                       kref_init(&bo->kref);
+                       spin_unlock(&bo->bdev->lru_lock);
 
-       spin_lock(&bo->bdev->lru_lock);
-       list_del(&bo->ddestroy);
-       spin_unlock(&bo->bdev->lru_lock);
+                       INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
+                       queue_work(bdev->wq, &bo->delayed_delete);
+                       return;
+               }
 
-       ttm_bo_cleanup_memtype_use(bo);
-       dma_resv_unlock(bo->base.resv);
+               ttm_bo_cleanup_memtype_use(bo);
+               dma_resv_unlock(bo->base.resv);
+       }
 
        atomic_dec(&ttm_glob.bo_count);
        bo->destroy(bo);
 }
 
+/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
 void ttm_bo_put(struct ttm_buffer_object *bo)
 {
        kref_put(&bo->kref, ttm_bo_release);
 }
 EXPORT_SYMBOL(ttm_bo_put);
 
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
-{
-       return cancel_delayed_work_sync(&bdev->wq);
-}
-EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
-{
-       if (resched)
-               schedule_delayed_work(&bdev->wq,
-                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-
 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
                                     struct ttm_resource **mem,
                                     struct ttm_operation_ctx *ctx,
@@ -475,7 +438,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
        bdev->funcs->evict_flags(bo, &placement);
 
        if (!placement.num_placement && !placement.num_busy_placement) {
-               ret = ttm_bo_wait(bo, true, false);
+               ret = ttm_bo_wait_ctx(bo, ctx);
                if (ret)
                        return ret;
 
@@ -501,7 +464,8 @@ bounce:
        if (ret == -EMULTIHOP) {
                ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
                if (ret) {
-                       pr_err("Buffer eviction failed\n");
+                       if (ret != -ERESTARTSYS && ret != -EINTR)
+                               pr_err("Buffer eviction failed\n");
                        ttm_resource_free(bo, &evict_mem);
                        goto out;
                }
@@ -512,6 +476,14 @@ out:
        return ret;
 }
 
+/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                              const struct ttm_place *place)
 {
@@ -771,13 +743,23 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
        return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
 }
 
-/*
- * Creates space for memory region @mem according to its type.
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
  *
- * This function first searches for free space in compatible memory types in
- * the priority order defined by the driver.  If free space isn't found, then
- * ttm_bo_mem_force_space is attempted in priority order to evict and find
- * space.
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @placement, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
@@ -883,6 +865,21 @@ out:
        return ret;
 }
 
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @ctx: validation parameters.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                    struct ttm_placement *placement,
                    struct ttm_operation_ctx *ctx)
@@ -897,14 +894,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        if (!placement->num_placement && !placement->num_busy_placement)
                return ttm_bo_pipeline_gutting(bo);
 
-       /*
-        * Check whether we need to move buffer.
-        */
-       if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
-               ret = ttm_bo_move_buffer(bo, placement, ctx);
-               if (ret)
-                       return ret;
-       }
+       /* Check whether we need to move buffer. */
+       if (bo->resource && ttm_resource_compat(bo->resource, placement))
+               return 0;
+
+       /* Moving of pinned BOs is forbidden */
+       if (bo->pin_count)
+               return -EINVAL;
+
+       ret = ttm_bo_move_buffer(bo, placement, ctx);
+       if (ret)
+               return ret;
+
        /*
         * We might need to add a TTM.
         */
@@ -956,11 +957,9 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
                         struct sg_table *sg, struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *))
 {
-       static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
        int ret;
 
        kref_init(&bo->kref);
-       INIT_LIST_HEAD(&bo->ddestroy);
        bo->bdev = bdev;
        bo->type = type;
        bo->page_alignment = alignment;
@@ -974,12 +973,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
                bo->base.resv = &bo->base._resv;
        atomic_inc(&ttm_glob.bo_count);
 
-       ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
-       if (unlikely(ret)) {
-               ttm_bo_put(bo);
-               return ret;
-       }
-
        /*
         * For ttm_bo_type_device buffers, allocate
         * address space from the device.
@@ -1076,6 +1069,11 @@ EXPORT_SYMBOL(ttm_bo_init_validate);
  * buffer object vm functions.
  */
 
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 {
        struct ttm_device *bdev = bo->bdev;
@@ -1085,36 +1083,44 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-int ttm_bo_wait(struct ttm_buffer_object *bo,
-               bool interruptible, bool no_wait)
+/**
+ * ttm_bo_wait_ctx - wait for buffer idle.
+ *
+ * @bo:  The buffer object.
+ * @ctx: defines how to wait
+ *
+ * Waits for the buffer to be idle. Used timeout depends on the context.
+ * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
+ * zero on success.
+ */
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
 {
-       long timeout = 15 * HZ;
+       long ret;
 
-       if (no_wait) {
-               if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
+       if (ctx->no_wait_gpu) {
+               if (dma_resv_test_signaled(bo->base.resv,
+                                          DMA_RESV_USAGE_BOOKKEEP))
                        return 0;
                else
                        return -EBUSY;
        }
 
-       timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
-                                       interruptible, timeout);
-       if (timeout < 0)
-               return timeout;
-
-       if (timeout == 0)
+       ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+                                   ctx->interruptible, 15 * HZ);
+       if (unlikely(ret < 0))
+               return ret;
+       if (unlikely(ret == 0))
                return -EBUSY;
-
        return 0;
 }
-EXPORT_SYMBOL(ttm_bo_wait);
+EXPORT_SYMBOL(ttm_bo_wait_ctx);
 
 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                   gfp_t gfp_flags)
 {
        struct ttm_place place;
        bool locked;
-       int ret;
+       long ret;
 
        /*
         * While the bo may already reside in SYSTEM placement, set
@@ -1169,7 +1175,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        /*
         * Make sure BO is idle.
         */
-       ret = ttm_bo_wait(bo, false, false);
+       ret = ttm_bo_wait_ctx(bo, ctx);
        if (unlikely(ret != 0))
                goto out;