Merge tag 'drm-misc-next-2021-10-14' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10
11 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
12                                         struct intel_memory_region *mem)
13 {
14         obj->mm.region = intel_memory_region_get(mem);
15
16         mutex_lock(&mem->objects.lock);
17         list_add(&obj->mm.region_link, &mem->objects.list);
18         mutex_unlock(&mem->objects.lock);
19 }
20
21 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
22 {
23         struct intel_memory_region *mem = obj->mm.region;
24
25         mutex_lock(&mem->objects.lock);
26         list_del(&obj->mm.region_link);
27         mutex_unlock(&mem->objects.lock);
28
29         intel_memory_region_put(mem);
30 }
31
32 struct drm_i915_gem_object *
33 i915_gem_object_create_region(struct intel_memory_region *mem,
34                               resource_size_t size,
35                               resource_size_t page_size,
36                               unsigned int flags)
37 {
38         struct drm_i915_gem_object *obj;
39         resource_size_t default_page_size;
40         int err;
41
42         /*
43          * NB: Our use of resource_size_t for the size stems from using struct
44          * resource for the mem->region. We might need to revisit this in the
45          * future.
46          */
47
48         GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
49
50         if (!mem)
51                 return ERR_PTR(-ENODEV);
52
53         default_page_size = mem->min_page_size;
54         if (page_size)
55                 default_page_size = page_size;
56
57         GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
58         GEM_BUG_ON(default_page_size < PAGE_SIZE);
59
60         size = round_up(size, default_page_size);
61
62         GEM_BUG_ON(!size);
63         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
64
65         if (i915_gem_object_size_2big(size))
66                 return ERR_PTR(-E2BIG);
67
68         obj = i915_gem_object_alloc();
69         if (!obj)
70                 return ERR_PTR(-ENOMEM);
71
72         err = mem->ops->init_object(mem, obj, size, page_size, flags);
73         if (err)
74                 goto err_object_free;
75
76         trace_i915_gem_object_create(obj);
77         return obj;
78
79 err_object_free:
80         i915_gem_object_free(obj);
81         return ERR_PTR(err);
82 }
83
84 /**
85  * i915_gem_process_region - Iterate over all objects of a region using ops
86  * to process and optionally skip objects
87  * @mr: The memory region
88  * @apply: ops and private data
89  *
90  * This function can be used to iterate over the regions object list,
91  * checking whether to skip objects, and, if not, lock the objects and
92  * process them using the supplied ops. Note that this function temporarily
93  * removes objects from the region list while iterating, so that if run
94  * concurrently with itself may not iterate over all objects.
95  *
96  * Return: 0 if successful, negative error code on failure.
97  */
98 int i915_gem_process_region(struct intel_memory_region *mr,
99                             struct i915_gem_apply_to_region *apply)
100 {
101         const struct i915_gem_apply_to_region_ops *ops = apply->ops;
102         struct drm_i915_gem_object *obj;
103         struct list_head still_in_list;
104         int ret = 0;
105
106         /*
107          * In the future, a non-NULL apply->ww could mean the caller is
108          * already in a locking transaction and provides its own context.
109          */
110         GEM_WARN_ON(apply->ww);
111
112         INIT_LIST_HEAD(&still_in_list);
113         mutex_lock(&mr->objects.lock);
114         for (;;) {
115                 struct i915_gem_ww_ctx ww;
116
117                 obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
118                                                mm.region_link);
119                 if (!obj)
120                         break;
121
122                 list_move_tail(&obj->mm.region_link, &still_in_list);
123                 if (!kref_get_unless_zero(&obj->base.refcount))
124                         continue;
125
126                 /*
127                  * Note: Someone else might be migrating the object at this
128                  * point. The object's region is not stable until we lock
129                  * the object.
130                  */
131                 mutex_unlock(&mr->objects.lock);
132                 apply->ww = &ww;
133                 for_i915_gem_ww(&ww, ret, apply->interruptible) {
134                         ret = i915_gem_object_lock(obj, apply->ww);
135                         if (ret)
136                                 continue;
137
138                         if (obj->mm.region == mr)
139                                 ret = ops->process_obj(apply, obj);
140                         /* Implicit object unlock */
141                 }
142
143                 i915_gem_object_put(obj);
144                 mutex_lock(&mr->objects.lock);
145                 if (ret)
146                         break;
147         }
148         list_splice_tail(&still_in_list, &mr->objects.list);
149         mutex_unlock(&mr->objects.lock);
150
151         return ret;
152 }