+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_gem_object.h"
-
-struct stub_fence {
- struct dma_fence dma;
- struct i915_sw_fence chain;
-};
-
-static int __i915_sw_fence_call
-stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
-
- switch (state) {
- case FENCE_COMPLETE:
- dma_fence_signal(&stub->dma);
- break;
-
- case FENCE_FREE:
- dma_fence_put(&stub->dma);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static const char *stub_driver_name(struct dma_fence *fence)
-{
- return DRIVER_NAME;
-}
-
-static const char *stub_timeline_name(struct dma_fence *fence)
-{
- return "object";
-}
-
-static void stub_release(struct dma_fence *fence)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
- i915_sw_fence_fini(&stub->chain);
-
- BUILD_BUG_ON(offsetof(typeof(*stub), dma));
- dma_fence_free(&stub->dma);
-}
-
-static const struct dma_fence_ops stub_fence_ops = {
- .get_driver_name = stub_driver_name,
- .get_timeline_name = stub_timeline_name,
- .release = stub_release,
-};
-
-struct dma_fence *
-i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
-{
- struct stub_fence *stub;
-
- assert_object_held(obj);
-
- stub = kmalloc(sizeof(*stub), GFP_KERNEL);
- if (!stub)
- return NULL;
-
- i915_sw_fence_init(&stub->chain, stub_notify);
- dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- 0, 0);
-
- if (i915_sw_fence_await_reservation(&stub->chain,
- obj->base.resv, NULL, true,
- i915_fence_timeout(to_i915(obj->base.dev)),
- I915_FENCE_GFP) < 0)
- goto err;
-
- dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
-
- return &stub->dma;
-
-err:
- stub_release(&stub->dma);
- return NULL;
-}
-
-void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
- i915_sw_fence_commit(&stub->chain);
-}
{
unsigned int needs_clflush;
unsigned int idx, offset;
- struct dma_fence *fence;
char __user *user_data;
u64 remain;
int ret;
if (ret)
return ret;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
ret = i915_gem_object_prepare_read(obj, &needs_clflush);
- if (ret) {
- i915_gem_object_unlock(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
- if (!fence)
- return -ENOMEM;
-
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset);
offset = 0;
}
- i915_gem_object_unlock_fence(obj, fence);
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
return ret;
}
return unwritten;
}
-static int
-i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pread *args)
+static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
+ struct drm_mm_node *node,
+ bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- intel_wakeref_t wakeref;
- struct drm_mm_node node;
- struct dma_fence *fence;
- void __user *user_data;
struct i915_vma *vma;
- u64 remain, offset;
+ struct i915_gem_ww_ctx ww;
int ret;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
vma = ERR_PTR(-ENODEV);
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ goto err_ww;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto err_ww;
+
if (!i915_gem_object_is_tiled(obj))
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (!IS_ERR(vma)) {
- node.start = i915_ggtt_offset(vma);
- node.flags = 0;
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (vma == ERR_PTR(-EDEADLK)) {
+ ret = -EDEADLK;
+ goto err_ww;
+ } else if (!IS_ERR(vma)) {
+ node->start = i915_ggtt_offset(vma);
+ node->flags = 0;
} else {
- ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
if (ret)
- goto out_rpm;
- GEM_BUG_ON(!drm_mm_node_allocated(&node));
+ goto err_ww;
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ vma = NULL;
}
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ ret = i915_gem_object_pin_pages(obj);
if (ret) {
- i915_gem_object_unlock(obj);
- goto out_unpin;
+ if (drm_mm_node_allocated(node)) {
+ ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+ remove_mappable_node(ggtt, node);
+ } else {
+ i915_vma_unpin(vma);
+ }
}
- fence = i915_gem_object_lock_fence(obj);
- i915_gem_object_unlock(obj);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unpin;
+err_ww:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return ret ? ERR_PTR(ret) : vma;
+}
+
+static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
+ struct drm_mm_node *node,
+ struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ i915_gem_object_unpin_pages(obj);
+ if (drm_mm_node_allocated(node)) {
+ ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+ remove_mappable_node(ggtt, node);
+ } else {
+ i915_vma_unpin(vma);
+ }
+}
+
+static int
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
+ struct drm_mm_node node;
+ void __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ vma = i915_gem_gtt_prepare(obj, &node, false);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_rpm;
}
user_data = u64_to_user_ptr(args->data_ptr);
offset += page_length;
}
- i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
- if (drm_mm_node_allocated(&node)) {
- ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(ggtt, &node);
- } else {
- i915_vma_unpin(vma);
- }
+ i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
if (ret)
goto out;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto out;
-
ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args);
- i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return ret;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
- struct dma_fence *fence;
struct i915_vma *vma;
u64 remain, offset;
void __user *user_data;
- int ret;
+ int ret = 0;
if (i915_gem_object_has_struct_page(obj)) {
/*
wakeref = intel_runtime_pm_get(rpm);
}
- vma = ERR_PTR(-ENODEV);
- if (!i915_gem_object_is_tiled(obj))
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (!IS_ERR(vma)) {
- node.start = i915_ggtt_offset(vma);
- node.flags = 0;
- } else {
- ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
- if (ret)
- goto out_rpm;
- GEM_BUG_ON(!drm_mm_node_allocated(&node));
- }
-
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret) {
- i915_gem_object_unlock(obj);
- goto out_unpin;
- }
-
- fence = i915_gem_object_lock_fence(obj);
- i915_gem_object_unlock(obj);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unpin;
+ vma = i915_gem_gtt_prepare(obj, &node, true);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_rpm;
}
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
- if (drm_mm_node_allocated(&node)) {
- ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(ggtt, &node);
- } else {
- i915_vma_unpin(vma);
- }
+ i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
return ret;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
- struct dma_fence *fence;
void __user *user_data;
u64 remain;
int ret;
if (ret)
return ret;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
ret = i915_gem_object_prepare_write(obj, &needs_clflush);
- if (ret) {
- i915_gem_object_unlock(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
- if (!fence)
- return -ENOMEM;
-
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
}
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_object_unlock_fence(obj, fence);
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
return ret;
}
if (ret)
goto err;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return ret;