Merge branch 'for-4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
index 92003ea..8a029ba 100644 (file)
@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
        struct vmw_private *dev_priv = res->dev_priv;
        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        if (res->id != -1)
                idr_remove(idr, res->id);
        res->id = -1;
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 static void vmw_resource_release(struct kref *kref)
@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
        int id;
        struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
-       write_lock(&dev_priv->resource_lock);
-       res->avail = false;
+       spin_lock(&dev_priv->resource_lock);
        list_del_init(&res->lru_head);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
        if (res->backup) {
                struct ttm_buffer_object *bo = &res->backup->base;
 
@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
        else
                kfree(res);
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        if (id != -1)
                idr_remove(idr, id);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
        BUG_ON(res->id != -1);
 
        idr_preload(GFP_KERNEL);
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
 
        ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
        if (ret >= 0)
                res->id = ret;
 
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
        idr_preload_end();
        return ret < 0 ? ret : 0;
 }
@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
        kref_init(&res->kref);
        res->hw_destroy = NULL;
        res->res_free = res_free;
-       res->avail = false;
        res->dev_priv = dev_priv;
        res->func = func;
        INIT_LIST_HEAD(&res->lru_head);
@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
                return vmw_resource_alloc_id(res);
 }
 
-/**
- * vmw_resource_activate
- *
- * @res:        Pointer to the newly created resource
- * @hw_destroy: Destroy function. NULL if none.
- *
- * Activate a resource after the hardware has been made aware of it.
- * Set tye destroy function to @destroy. Typically this frees the
- * resource and destroys the hardware resources associated with it.
- * Activate basically means that the function vmw_resource_lookup will
- * find it.
- */
-void vmw_resource_activate(struct vmw_resource *res,
-                          void (*hw_destroy) (struct vmw_resource *))
-{
-       struct vmw_private *dev_priv = res->dev_priv;
-
-       write_lock(&dev_priv->resource_lock);
-       res->avail = true;
-       res->hw_destroy = hw_destroy;
-       write_unlock(&dev_priv->resource_lock);
-}
 
 /**
  * vmw_user_resource_lookup_handle - lookup a struct resource from a
@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
                goto out_bad_resource;
 
        res = converter->base_obj_to_res(base);
-
-       read_lock(&dev_priv->resource_lock);
-       if (!res->avail || res->res_free != converter->res_free) {
-               read_unlock(&dev_priv->resource_lock);
-               goto out_bad_resource;
-       }
-
        kref_get(&res->kref);
-       read_unlock(&dev_priv->resource_lock);
 
        *p_res = res;
        ret = 0;
@@ -262,6 +230,41 @@ out_bad_resource:
        return ret;
 }
 
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+                                     struct ttm_object_file *tfile,
+                                     uint32_t handle,
+                                     const struct vmw_user_resource_conv
+                                     *converter)
+{
+       struct ttm_base_object *base;
+
+       base = ttm_base_object_noref_lookup(tfile, handle);
+       if (!base)
+               return ERR_PTR(-ESRCH);
+
+       if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+               ttm_base_object_noref_release();
+               return ERR_PTR(-EINVAL);
+       }
+
+       return converter->base_obj_to_res(base);
+}
+
 /**
  * Helper function that looks either a surface or bo.
  *
@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (!res->func->may_evict || res->id == -1 || res->pin_count)
                return;
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        list_add_tail(&res->lru_head,
                      &res->dev_priv->res_lru[res->func->res_type]);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 /**
@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
 
-       write_lock(&dev_priv->resource_lock);
+       spin_lock(&dev_priv->resource_lock);
        list_del_init(&res->lru_head);
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 
        if (res->func->needs_backup && res->backup == NULL &&
            !no_backup) {
@@ -587,15 +590,18 @@ out_no_unbind:
 /**
  * vmw_resource_validate - Make a resource up-to-date and visible
  *                         to the device.
- *
- * @res:            The resource to make visible to the device.
+ * @res: The resource to make visible to the device.
+ * @intr: Perform waits interruptible if possible.
  *
  * On succesful return, any backup DMA buffer pointed to by @res->backup will
  * be reserved and validated.
  * On hardware resource shortage, this function will repeatedly evict
  * resources of the same type until the validation succeeds.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on failure.
  */
-int vmw_resource_validate(struct vmw_resource *res)
+int vmw_resource_validate(struct vmw_resource *res, bool intr)
 {
        int ret;
        struct vmw_resource *evict_res;
@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
                if (likely(ret != -EBUSY))
                        break;
 
-               write_lock(&dev_priv->resource_lock);
+               spin_lock(&dev_priv->resource_lock);
                if (list_empty(lru_list) || !res->func->may_evict) {
                        DRM_ERROR("Out of device device resources "
                                  "for %s.\n", res->func->type_name);
                        ret = -EBUSY;
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        break;
                }
 
@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
                                          lru_head));
                list_del_init(&evict_res->lru_head);
 
-               write_unlock(&dev_priv->resource_lock);
+               spin_unlock(&dev_priv->resource_lock);
 
                /* Trylock backup buffers with a NULL ticket. */
-               ret = vmw_resource_do_evict(NULL, evict_res, true);
+               ret = vmw_resource_do_evict(NULL, evict_res, intr);
                if (unlikely(ret != 0)) {
-                       write_lock(&dev_priv->resource_lock);
+                       spin_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        if (ret == -ERESTARTSYS ||
                            ++err_count > VMW_RES_EVICT_ERR_COUNT) {
                                vmw_resource_unreference(&evict_res);
@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
        struct ww_acquire_ctx ticket;
 
        do {
-               write_lock(&dev_priv->resource_lock);
+               spin_lock(&dev_priv->resource_lock);
 
                if (list_empty(lru_list))
                        goto out_unlock;
@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
                        list_first_entry(lru_list, struct vmw_resource,
                                         lru_head));
                list_del_init(&evict_res->lru_head);
-               write_unlock(&dev_priv->resource_lock);
+               spin_unlock(&dev_priv->resource_lock);
 
                /* Wait lock backup buffers with a ticket. */
                ret = vmw_resource_do_evict(&ticket, evict_res, false);
                if (unlikely(ret != 0)) {
-                       write_lock(&dev_priv->resource_lock);
+                       spin_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
-                       write_unlock(&dev_priv->resource_lock);
+                       spin_unlock(&dev_priv->resource_lock);
                        if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
                                vmw_resource_unreference(&evict_res);
                                return;
@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
        } while (1);
 
 out_unlock:
-       write_unlock(&dev_priv->resource_lock);
+       spin_unlock(&dev_priv->resource_lock);
 }
 
 /**
@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                        /* Do we really need to pin the MOB as well? */
                        vmw_bo_pin_reserved(vbo, true);
                }
-               ret = vmw_resource_validate(res);
+               ret = vmw_resource_validate(res, interruptible);
                if (vbo)
                        ttm_bo_unreserve(&vbo->base);
                if (ret)