drm/i915: cleanup the region class/instance encoding
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8
9 static const struct {
10         u16 class;
11         u16 instance;
12 } intel_region_map[] = {
13         [INTEL_REGION_SMEM] = {
14                 .class = INTEL_MEMORY_SYSTEM,
15                 .instance = 0,
16         },
17         [INTEL_REGION_LMEM] = {
18                 .class = INTEL_MEMORY_LOCAL,
19                 .instance = 0,
20         },
21         [INTEL_REGION_STOLEN] = {
22                 .class = INTEL_MEMORY_STOLEN,
23                 .instance = 0,
24         },
25 };
26
27 struct intel_memory_region *
28 intel_memory_region_by_type(struct drm_i915_private *i915,
29                             enum intel_memory_type mem_type)
30 {
31         struct intel_memory_region *mr;
32         int id;
33
34         for_each_memory_region(mr, i915, id)
35                 if (mr->type == mem_type)
36                         return mr;
37
38         return NULL;
39 }
40
41 static u64
42 intel_memory_region_free_pages(struct intel_memory_region *mem,
43                                struct list_head *blocks)
44 {
45         struct i915_buddy_block *block, *on;
46         u64 size = 0;
47
48         list_for_each_entry_safe(block, on, blocks, link) {
49                 size += i915_buddy_block_size(&mem->mm, block);
50                 i915_buddy_free(&mem->mm, block);
51         }
52         INIT_LIST_HEAD(blocks);
53
54         return size;
55 }
56
57 void
58 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
59                                       struct list_head *blocks)
60 {
61         mutex_lock(&mem->mm_lock);
62         mem->avail += intel_memory_region_free_pages(mem, blocks);
63         mutex_unlock(&mem->mm_lock);
64 }
65
66 void
67 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
68 {
69         struct list_head blocks;
70
71         INIT_LIST_HEAD(&blocks);
72         list_add(&block->link, &blocks);
73         __intel_memory_region_put_pages_buddy(block->private, &blocks);
74 }
75
76 int
77 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
78                                       resource_size_t size,
79                                       unsigned int flags,
80                                       struct list_head *blocks)
81 {
82         unsigned int min_order = 0;
83         unsigned long n_pages;
84
85         GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
86         GEM_BUG_ON(!list_empty(blocks));
87
88         if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
89                 min_order = ilog2(mem->min_page_size) -
90                             ilog2(mem->mm.chunk_size);
91         }
92
93         if (flags & I915_ALLOC_CONTIGUOUS) {
94                 size = roundup_pow_of_two(size);
95                 min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
96         }
97
98         if (size > mem->mm.size)
99                 return -E2BIG;
100
101         n_pages = size >> ilog2(mem->mm.chunk_size);
102
103         mutex_lock(&mem->mm_lock);
104
105         do {
106                 struct i915_buddy_block *block;
107                 unsigned int order;
108
109                 order = fls(n_pages) - 1;
110                 GEM_BUG_ON(order > mem->mm.max_order);
111                 GEM_BUG_ON(order < min_order);
112
113                 do {
114                         block = i915_buddy_alloc(&mem->mm, order);
115                         if (!IS_ERR(block))
116                                 break;
117
118                         if (order-- == min_order)
119                                 goto err_free_blocks;
120                 } while (1);
121
122                 n_pages -= BIT(order);
123
124                 block->private = mem;
125                 list_add_tail(&block->link, blocks);
126
127                 if (!n_pages)
128                         break;
129         } while (1);
130
131         mem->avail -= size;
132         mutex_unlock(&mem->mm_lock);
133         return 0;
134
135 err_free_blocks:
136         intel_memory_region_free_pages(mem, blocks);
137         mutex_unlock(&mem->mm_lock);
138         return -ENXIO;
139 }
140
141 struct i915_buddy_block *
142 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
143                                       resource_size_t size,
144                                       unsigned int flags)
145 {
146         struct i915_buddy_block *block;
147         LIST_HEAD(blocks);
148         int ret;
149
150         ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
151         if (ret)
152                 return ERR_PTR(ret);
153
154         block = list_first_entry(&blocks, typeof(*block), link);
155         list_del_init(&block->link);
156         return block;
157 }
158
159 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
160 {
161         return i915_buddy_init(&mem->mm, resource_size(&mem->region),
162                                PAGE_SIZE);
163 }
164
165 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
166 {
167         i915_buddy_free_list(&mem->mm, &mem->reserved);
168         i915_buddy_fini(&mem->mm);
169 }
170
171 int intel_memory_region_reserve(struct intel_memory_region *mem,
172                                 u64 offset, u64 size)
173 {
174         int ret;
175
176         mutex_lock(&mem->mm_lock);
177         ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
178         mutex_unlock(&mem->mm_lock);
179
180         return ret;
181 }
182
183 struct intel_memory_region *
184 intel_memory_region_create(struct drm_i915_private *i915,
185                            resource_size_t start,
186                            resource_size_t size,
187                            resource_size_t min_page_size,
188                            resource_size_t io_start,
189                            const struct intel_memory_region_ops *ops)
190 {
191         struct intel_memory_region *mem;
192         int err;
193
194         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
195         if (!mem)
196                 return ERR_PTR(-ENOMEM);
197
198         mem->i915 = i915;
199         mem->region = (struct resource)DEFINE_RES_MEM(start, size);
200         mem->io_start = io_start;
201         mem->min_page_size = min_page_size;
202         mem->ops = ops;
203         mem->total = size;
204         mem->avail = mem->total;
205
206         mutex_init(&mem->objects.lock);
207         INIT_LIST_HEAD(&mem->objects.list);
208         INIT_LIST_HEAD(&mem->objects.purgeable);
209         INIT_LIST_HEAD(&mem->reserved);
210
211         mutex_init(&mem->mm_lock);
212
213         if (ops->init) {
214                 err = ops->init(mem);
215                 if (err)
216                         goto err_free;
217         }
218
219         kref_init(&mem->kref);
220         return mem;
221
222 err_free:
223         kfree(mem);
224         return ERR_PTR(err);
225 }
226
227 void intel_memory_region_set_name(struct intel_memory_region *mem,
228                                   const char *fmt, ...)
229 {
230         va_list ap;
231
232         va_start(ap, fmt);
233         vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
234         va_end(ap);
235 }
236
237 static void __intel_memory_region_destroy(struct kref *kref)
238 {
239         struct intel_memory_region *mem =
240                 container_of(kref, typeof(*mem), kref);
241
242         if (mem->ops->release)
243                 mem->ops->release(mem);
244
245         mutex_destroy(&mem->mm_lock);
246         mutex_destroy(&mem->objects.lock);
247         kfree(mem);
248 }
249
250 struct intel_memory_region *
251 intel_memory_region_get(struct intel_memory_region *mem)
252 {
253         kref_get(&mem->kref);
254         return mem;
255 }
256
257 void intel_memory_region_put(struct intel_memory_region *mem)
258 {
259         kref_put(&mem->kref, __intel_memory_region_destroy);
260 }
261
262 /* Global memory region registration -- only slight layer inversions! */
263
264 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
265 {
266         int err, i;
267
268         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
269                 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
270                 u16 type, instance;
271
272                 if (!HAS_REGION(i915, BIT(i)))
273                         continue;
274
275                 type = intel_region_map[i].class;
276                 instance = intel_region_map[i].instance;
277                 switch (type) {
278                 case INTEL_MEMORY_SYSTEM:
279                         mem = i915_gem_shmem_setup(i915);
280                         break;
281                 case INTEL_MEMORY_STOLEN:
282                         mem = i915_gem_stolen_setup(i915);
283                         break;
284                 default:
285                         continue;
286                 }
287
288                 if (IS_ERR(mem)) {
289                         err = PTR_ERR(mem);
290                         drm_err(&i915->drm,
291                                 "Failed to setup region(%d) type=%d\n",
292                                 err, type);
293                         goto out_cleanup;
294                 }
295
296                 mem->id = i;
297                 mem->type = type;
298                 mem->instance = instance;
299
300                 i915->mm.regions[i] = mem;
301         }
302
303         return 0;
304
305 out_cleanup:
306         intel_memory_regions_driver_release(i915);
307         return err;
308 }
309
310 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
311 {
312         int i;
313
314         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
315                 struct intel_memory_region *region =
316                         fetch_and_zero(&i915->mm.regions[i]);
317
318                 if (region)
319                         intel_memory_region_put(region);
320         }
321 }
322
323 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
324 #include "selftests/intel_memory_region.c"
325 #include "selftests/mock_region.c"
326 #endif