vma->pages = NULL;
}
-static int vma_bind(struct i915_vma *vma,
+static int vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+ return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
}
-static void vma_unbind(struct i915_vma *vma)
+static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->vma_ops.unbind_vma(vma);
+ vm->vma_ops.unbind_vma(vm, vma);
}
static const struct i915_vma_ops proxy_vma_ops = {
vma->pages = NULL;
}
-static int pd_vma_bind(struct i915_vma *vma,
+static int pd_vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
return 0;
}
-static void pd_vma_unbind(struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
-static int ggtt_bind_vma(struct i915_vma *vma,
+static int ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
return 0;
}
-static void ggtt_unbind_vma(struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
return ret;
}
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
- if (flags & I915_VMA_ALLOC) {
- ret = alias->vm.allocate_va_range(&alias->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
- __i915_vma_flags(vma)));
- alias->vm.insert_entries(&alias->vm, vma,
- cache_level, pte_flags);
+ ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
+ if (ret)
+ return ret;
}
if (flags & I915_VMA_GLOBAL_BIND)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
return 0;
}
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- struct i915_address_space *vm = vma->vm;
-
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- struct i915_address_space *vm =
- &i915_vm_to_ggtt(vma->vm)->alias->vm;
- vm->clear_range(vm, vma->node.start, vma->size);
- }
+ if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
+ int (*bind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
/*
* Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page.
*/
- void (*unbind_vma)(struct i915_vma *vma);
+ void (*unbind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma);
int (*set_pages)(struct i915_vma *vma);
void (*clear_pages)(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma);
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma);
+
void gtt_write_workarounds(struct intel_gt *gt);
void setup_private_pat(struct intel_uncore *uncore);
return ppgtt;
}
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
int err;
- if (flags & I915_VMA_ALLOC) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
+ if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ err = vm->allocate_va_range(vm, vma->node.start, vma->size);
if (err)
return err;
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
return 0;
}
-static void ppgtt_unbind_vma(struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
int ppgtt_set_pages(struct i915_vma *vma)
struct i915_vma *vma = vw->vma;
int err;
- err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
work->vma = vma;
work->cache_level = cache_level;
- work->flags = bind_flags | I915_VMA_ALLOC;
+ work->flags = bind_flags;
/*
* Note we only want to chain up to the migration fence on
work->pinned = vma->obj;
}
} else {
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
if (ret)
return ret;
}
if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma);
+ vma->ops->unbind_vma(vma->vm, vma);
}
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
#define I915_VMA_ALLOC_BIT 12
-#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
#define I915_VMA_ERROR_BIT 13
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
{
}
-static int mock_bind_ppgtt(struct i915_vma *vma,
+static int mock_bind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
return 0;
}
-static void mock_unbind_ppgtt(struct i915_vma *vma)
+static void mock_unbind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}
return ppgtt;
}
-static int mock_bind_ggtt(struct i915_vma *vma,
+static int mock_bind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
return 0;
}
-static void mock_unbind_ggtt(struct i915_vma *vma)
+static void mock_unbind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}