drm/i915: Create stolen memory region from local memory
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8
9 static const struct {
10         u16 class;
11         u16 instance;
12 } intel_region_map[] = {
13         [INTEL_REGION_SMEM] = {
14                 .class = INTEL_MEMORY_SYSTEM,
15                 .instance = 0,
16         },
17         [INTEL_REGION_LMEM] = {
18                 .class = INTEL_MEMORY_LOCAL,
19                 .instance = 0,
20         },
21         [INTEL_REGION_STOLEN_SMEM] = {
22                 .class = INTEL_MEMORY_STOLEN_SYSTEM,
23                 .instance = 0,
24         },
25         [INTEL_REGION_STOLEN_LMEM] = {
26                 .class = INTEL_MEMORY_STOLEN_LOCAL,
27                 .instance = 0,
28         },
29 };
30
31 struct intel_memory_region *
32 intel_memory_region_by_type(struct drm_i915_private *i915,
33                             enum intel_memory_type mem_type)
34 {
35         struct intel_memory_region *mr;
36         int id;
37
38         for_each_memory_region(mr, i915, id)
39                 if (mr->type == mem_type)
40                         return mr;
41
42         return NULL;
43 }
44
45 static u64
46 intel_memory_region_free_pages(struct intel_memory_region *mem,
47                                struct list_head *blocks)
48 {
49         struct i915_buddy_block *block, *on;
50         u64 size = 0;
51
52         list_for_each_entry_safe(block, on, blocks, link) {
53                 size += i915_buddy_block_size(&mem->mm, block);
54                 i915_buddy_free(&mem->mm, block);
55         }
56         INIT_LIST_HEAD(blocks);
57
58         return size;
59 }
60
61 void
62 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
63                                       struct list_head *blocks)
64 {
65         mutex_lock(&mem->mm_lock);
66         mem->avail += intel_memory_region_free_pages(mem, blocks);
67         mutex_unlock(&mem->mm_lock);
68 }
69
70 void
71 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
72 {
73         struct list_head blocks;
74
75         INIT_LIST_HEAD(&blocks);
76         list_add(&block->link, &blocks);
77         __intel_memory_region_put_pages_buddy(block->private, &blocks);
78 }
79
80 int
81 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
82                                       resource_size_t size,
83                                       unsigned int flags,
84                                       struct list_head *blocks)
85 {
86         unsigned int min_order = 0;
87         unsigned long n_pages;
88
89         GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
90         GEM_BUG_ON(!list_empty(blocks));
91
92         if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
93                 min_order = ilog2(mem->min_page_size) -
94                             ilog2(mem->mm.chunk_size);
95         }
96
97         if (flags & I915_ALLOC_CONTIGUOUS) {
98                 size = roundup_pow_of_two(size);
99                 min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
100         }
101
102         if (size > mem->mm.size)
103                 return -E2BIG;
104
105         n_pages = size >> ilog2(mem->mm.chunk_size);
106
107         mutex_lock(&mem->mm_lock);
108
109         do {
110                 struct i915_buddy_block *block;
111                 unsigned int order;
112
113                 order = fls(n_pages) - 1;
114                 GEM_BUG_ON(order > mem->mm.max_order);
115                 GEM_BUG_ON(order < min_order);
116
117                 do {
118                         block = i915_buddy_alloc(&mem->mm, order);
119                         if (!IS_ERR(block))
120                                 break;
121
122                         if (order-- == min_order)
123                                 goto err_free_blocks;
124                 } while (1);
125
126                 n_pages -= BIT(order);
127
128                 block->private = mem;
129                 list_add_tail(&block->link, blocks);
130
131                 if (!n_pages)
132                         break;
133         } while (1);
134
135         mem->avail -= size;
136         mutex_unlock(&mem->mm_lock);
137         return 0;
138
139 err_free_blocks:
140         intel_memory_region_free_pages(mem, blocks);
141         mutex_unlock(&mem->mm_lock);
142         return -ENXIO;
143 }
144
145 struct i915_buddy_block *
146 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
147                                       resource_size_t size,
148                                       unsigned int flags)
149 {
150         struct i915_buddy_block *block;
151         LIST_HEAD(blocks);
152         int ret;
153
154         ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
155         if (ret)
156                 return ERR_PTR(ret);
157
158         block = list_first_entry(&blocks, typeof(*block), link);
159         list_del_init(&block->link);
160         return block;
161 }
162
163 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
164 {
165         return i915_buddy_init(&mem->mm, resource_size(&mem->region),
166                                PAGE_SIZE);
167 }
168
169 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
170 {
171         i915_buddy_free_list(&mem->mm, &mem->reserved);
172         i915_buddy_fini(&mem->mm);
173 }
174
175 int intel_memory_region_reserve(struct intel_memory_region *mem,
176                                 u64 offset, u64 size)
177 {
178         int ret;
179
180         mutex_lock(&mem->mm_lock);
181         ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
182         mutex_unlock(&mem->mm_lock);
183
184         return ret;
185 }
186
187 struct intel_memory_region *
188 intel_memory_region_create(struct drm_i915_private *i915,
189                            resource_size_t start,
190                            resource_size_t size,
191                            resource_size_t min_page_size,
192                            resource_size_t io_start,
193                            const struct intel_memory_region_ops *ops)
194 {
195         struct intel_memory_region *mem;
196         int err;
197
198         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
199         if (!mem)
200                 return ERR_PTR(-ENOMEM);
201
202         mem->i915 = i915;
203         mem->region = (struct resource)DEFINE_RES_MEM(start, size);
204         mem->io_start = io_start;
205         mem->min_page_size = min_page_size;
206         mem->ops = ops;
207         mem->total = size;
208         mem->avail = mem->total;
209
210         mutex_init(&mem->objects.lock);
211         INIT_LIST_HEAD(&mem->objects.list);
212         INIT_LIST_HEAD(&mem->objects.purgeable);
213         INIT_LIST_HEAD(&mem->reserved);
214
215         mutex_init(&mem->mm_lock);
216
217         if (ops->init) {
218                 err = ops->init(mem);
219                 if (err)
220                         goto err_free;
221         }
222
223         kref_init(&mem->kref);
224         return mem;
225
226 err_free:
227         kfree(mem);
228         return ERR_PTR(err);
229 }
230
231 void intel_memory_region_set_name(struct intel_memory_region *mem,
232                                   const char *fmt, ...)
233 {
234         va_list ap;
235
236         va_start(ap, fmt);
237         vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
238         va_end(ap);
239 }
240
241 static void __intel_memory_region_destroy(struct kref *kref)
242 {
243         struct intel_memory_region *mem =
244                 container_of(kref, typeof(*mem), kref);
245
246         if (mem->ops->release)
247                 mem->ops->release(mem);
248
249         mutex_destroy(&mem->mm_lock);
250         mutex_destroy(&mem->objects.lock);
251         kfree(mem);
252 }
253
254 struct intel_memory_region *
255 intel_memory_region_get(struct intel_memory_region *mem)
256 {
257         kref_get(&mem->kref);
258         return mem;
259 }
260
261 void intel_memory_region_put(struct intel_memory_region *mem)
262 {
263         kref_put(&mem->kref, __intel_memory_region_destroy);
264 }
265
266 /* Global memory region registration -- only slight layer inversions! */
267
268 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
269 {
270         int err, i;
271
272         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
273                 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
274                 u16 type, instance;
275
276                 if (!HAS_REGION(i915, BIT(i)))
277                         continue;
278
279                 type = intel_region_map[i].class;
280                 instance = intel_region_map[i].instance;
281                 switch (type) {
282                 case INTEL_MEMORY_SYSTEM:
283                         mem = i915_gem_shmem_setup(i915);
284                         break;
285                 case INTEL_MEMORY_STOLEN_LOCAL:
286                         mem = i915_gem_stolen_lmem_setup(i915);
287                         if (!IS_ERR(mem))
288                                 i915->mm.stolen_region = mem;
289                         break;
290                 case INTEL_MEMORY_STOLEN_SYSTEM:
291                         mem = i915_gem_stolen_smem_setup(i915);
292                         if (!IS_ERR(mem))
293                                 i915->mm.stolen_region = mem;
294                         break;
295                 default:
296                         continue;
297                 }
298
299                 if (IS_ERR(mem)) {
300                         err = PTR_ERR(mem);
301                         drm_err(&i915->drm,
302                                 "Failed to setup region(%d) type=%d\n",
303                                 err, type);
304                         goto out_cleanup;
305                 }
306
307                 mem->id = i;
308                 mem->type = type;
309                 mem->instance = instance;
310
311                 i915->mm.regions[i] = mem;
312         }
313
314         return 0;
315
316 out_cleanup:
317         intel_memory_regions_driver_release(i915);
318         return err;
319 }
320
321 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
322 {
323         int i;
324
325         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
326                 struct intel_memory_region *region =
327                         fetch_and_zero(&i915->mm.regions[i]);
328
329                 if (region)
330                         intel_memory_region_put(region);
331         }
332 }
333
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftests/intel_memory_region.c"
336 #include "selftests/mock_region.c"
337 #endif