1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
9 #include "i915_trace.h"
11 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
12 struct intel_memory_region *mem)
14 obj->mm.region = intel_memory_region_get(mem);
16 mutex_lock(&mem->objects.lock);
17 list_add(&obj->mm.region_link, &mem->objects.list);
18 mutex_unlock(&mem->objects.lock);
21 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
23 struct intel_memory_region *mem = obj->mm.region;
25 mutex_lock(&mem->objects.lock);
26 list_del(&obj->mm.region_link);
27 mutex_unlock(&mem->objects.lock);
29 intel_memory_region_put(mem);
32 struct drm_i915_gem_object *
33 i915_gem_object_create_region(struct intel_memory_region *mem,
35 resource_size_t page_size,
38 struct drm_i915_gem_object *obj;
39 resource_size_t default_page_size;
43 * NB: Our use of resource_size_t for the size stems from using struct
44 * resource for the mem->region. We might need to revisit this in the
48 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
51 return ERR_PTR(-ENODEV);
53 default_page_size = mem->min_page_size;
55 default_page_size = page_size;
57 GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
58 GEM_BUG_ON(default_page_size < PAGE_SIZE);
60 size = round_up(size, default_page_size);
63 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
65 if (i915_gem_object_size_2big(size))
66 return ERR_PTR(-E2BIG);
68 obj = i915_gem_object_alloc();
70 return ERR_PTR(-ENOMEM);
72 err = mem->ops->init_object(mem, obj, size, page_size, flags);
76 trace_i915_gem_object_create(obj);
80 i915_gem_object_free(obj);
85 * i915_gem_process_region - Iterate over all objects of a region using ops
86 * to process and optionally skip objects
87 * @mr: The memory region
88 * @apply: ops and private data
90 * This function can be used to iterate over the regions object list,
91 * checking whether to skip objects, and, if not, lock the objects and
92 * process them using the supplied ops. Note that this function temporarily
93 * removes objects from the region list while iterating, so that if run
94 * concurrently with itself may not iterate over all objects.
96 * Return: 0 if successful, negative error code on failure.
98 int i915_gem_process_region(struct intel_memory_region *mr,
99 struct i915_gem_apply_to_region *apply)
101 const struct i915_gem_apply_to_region_ops *ops = apply->ops;
102 struct drm_i915_gem_object *obj;
103 struct list_head still_in_list;
107 * In the future, a non-NULL apply->ww could mean the caller is
108 * already in a locking transaction and provides its own context.
110 GEM_WARN_ON(apply->ww);
112 INIT_LIST_HEAD(&still_in_list);
113 mutex_lock(&mr->objects.lock);
115 struct i915_gem_ww_ctx ww;
117 obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
122 list_move_tail(&obj->mm.region_link, &still_in_list);
123 if (!kref_get_unless_zero(&obj->base.refcount))
127 * Note: Someone else might be migrating the object at this
128 * point. The object's region is not stable until we lock
131 mutex_unlock(&mr->objects.lock);
133 for_i915_gem_ww(&ww, ret, apply->interruptible) {
134 ret = i915_gem_object_lock(obj, apply->ww);
138 if (obj->mm.region == mr)
139 ret = ops->process_obj(apply, obj);
140 /* Implicit object unlock */
143 i915_gem_object_put(obj);
144 mutex_lock(&mr->objects.lock);
148 list_splice_tail(&still_in_list, &mr->objects.list);
149 mutex_unlock(&mr->objects.lock);