1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "intel_memory_region.h"
12 } intel_region_map[] = {
13 [INTEL_REGION_SMEM] = {
14 .class = INTEL_MEMORY_SYSTEM,
17 [INTEL_REGION_LMEM] = {
18 .class = INTEL_MEMORY_LOCAL,
21 [INTEL_REGION_STOLEN_SMEM] = {
22 .class = INTEL_MEMORY_STOLEN_SYSTEM,
25 [INTEL_REGION_STOLEN_LMEM] = {
26 .class = INTEL_MEMORY_STOLEN_LOCAL,
31 struct intel_memory_region *
32 intel_memory_region_by_type(struct drm_i915_private *i915,
33 enum intel_memory_type mem_type)
35 struct intel_memory_region *mr;
38 for_each_memory_region(mr, i915, id)
39 if (mr->type == mem_type)
46 intel_memory_region_free_pages(struct intel_memory_region *mem,
47 struct list_head *blocks)
49 struct i915_buddy_block *block, *on;
52 list_for_each_entry_safe(block, on, blocks, link) {
53 size += i915_buddy_block_size(&mem->mm, block);
54 i915_buddy_free(&mem->mm, block);
56 INIT_LIST_HEAD(blocks);
62 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
63 struct list_head *blocks)
65 mutex_lock(&mem->mm_lock);
66 mem->avail += intel_memory_region_free_pages(mem, blocks);
67 mutex_unlock(&mem->mm_lock);
71 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
73 struct list_head blocks;
75 INIT_LIST_HEAD(&blocks);
76 list_add(&block->link, &blocks);
77 __intel_memory_region_put_pages_buddy(block->private, &blocks);
81 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
84 struct list_head *blocks)
86 unsigned int min_order = 0;
87 unsigned long n_pages;
89 GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
90 GEM_BUG_ON(!list_empty(blocks));
92 if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
93 min_order = ilog2(mem->min_page_size) -
94 ilog2(mem->mm.chunk_size);
97 if (flags & I915_ALLOC_CONTIGUOUS) {
98 size = roundup_pow_of_two(size);
99 min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
102 if (size > mem->mm.size)
105 n_pages = size >> ilog2(mem->mm.chunk_size);
107 mutex_lock(&mem->mm_lock);
110 struct i915_buddy_block *block;
113 order = fls(n_pages) - 1;
114 GEM_BUG_ON(order > mem->mm.max_order);
115 GEM_BUG_ON(order < min_order);
118 block = i915_buddy_alloc(&mem->mm, order);
122 if (order-- == min_order)
123 goto err_free_blocks;
126 n_pages -= BIT(order);
128 block->private = mem;
129 list_add_tail(&block->link, blocks);
136 mutex_unlock(&mem->mm_lock);
140 intel_memory_region_free_pages(mem, blocks);
141 mutex_unlock(&mem->mm_lock);
145 struct i915_buddy_block *
146 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
147 resource_size_t size,
150 struct i915_buddy_block *block;
154 ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
158 block = list_first_entry(&blocks, typeof(*block), link);
159 list_del_init(&block->link);
163 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
165 return i915_buddy_init(&mem->mm, resource_size(&mem->region),
169 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
171 i915_buddy_free_list(&mem->mm, &mem->reserved);
172 i915_buddy_fini(&mem->mm);
175 int intel_memory_region_reserve(struct intel_memory_region *mem,
176 u64 offset, u64 size)
180 mutex_lock(&mem->mm_lock);
181 ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
182 mutex_unlock(&mem->mm_lock);
187 struct intel_memory_region *
188 intel_memory_region_create(struct drm_i915_private *i915,
189 resource_size_t start,
190 resource_size_t size,
191 resource_size_t min_page_size,
192 resource_size_t io_start,
193 const struct intel_memory_region_ops *ops)
195 struct intel_memory_region *mem;
198 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
200 return ERR_PTR(-ENOMEM);
203 mem->region = (struct resource)DEFINE_RES_MEM(start, size);
204 mem->io_start = io_start;
205 mem->min_page_size = min_page_size;
208 mem->avail = mem->total;
210 mutex_init(&mem->objects.lock);
211 INIT_LIST_HEAD(&mem->objects.list);
212 INIT_LIST_HEAD(&mem->objects.purgeable);
213 INIT_LIST_HEAD(&mem->reserved);
215 mutex_init(&mem->mm_lock);
218 err = ops->init(mem);
223 kref_init(&mem->kref);
231 void intel_memory_region_set_name(struct intel_memory_region *mem,
232 const char *fmt, ...)
237 vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
241 static void __intel_memory_region_destroy(struct kref *kref)
243 struct intel_memory_region *mem =
244 container_of(kref, typeof(*mem), kref);
246 if (mem->ops->release)
247 mem->ops->release(mem);
249 mutex_destroy(&mem->mm_lock);
250 mutex_destroy(&mem->objects.lock);
254 struct intel_memory_region *
255 intel_memory_region_get(struct intel_memory_region *mem)
257 kref_get(&mem->kref);
261 void intel_memory_region_put(struct intel_memory_region *mem)
263 kref_put(&mem->kref, __intel_memory_region_destroy);
266 /* Global memory region registration -- only slight layer inversions! */
268 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
272 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
273 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
276 if (!HAS_REGION(i915, BIT(i)))
279 type = intel_region_map[i].class;
280 instance = intel_region_map[i].instance;
282 case INTEL_MEMORY_SYSTEM:
283 mem = i915_gem_shmem_setup(i915);
285 case INTEL_MEMORY_STOLEN_LOCAL:
286 mem = i915_gem_stolen_lmem_setup(i915);
288 i915->mm.stolen_region = mem;
290 case INTEL_MEMORY_STOLEN_SYSTEM:
291 mem = i915_gem_stolen_smem_setup(i915);
293 i915->mm.stolen_region = mem;
302 "Failed to setup region(%d) type=%d\n",
309 mem->instance = instance;
311 i915->mm.regions[i] = mem;
317 intel_memory_regions_driver_release(i915);
321 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
325 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
326 struct intel_memory_region *region =
327 fetch_and_zero(&i915->mm.regions[i]);
330 intel_memory_region_put(region);
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftests/intel_memory_region.c"
336 #include "selftests/mock_region.c"