1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
9 #include "i915_trace.h"
12 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
13 struct sg_table *pages)
15 __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
17 obj->mm.dirty = false;
23 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
25 const u64 max_segment = i915_sg_segment_size();
26 struct intel_memory_region *mem = obj->mm.region;
27 struct list_head *blocks = &obj->mm.blocks;
28 resource_size_t size = obj->base.size;
29 resource_size_t prev_end;
30 struct i915_buddy_block *block;
33 struct scatterlist *sg;
34 unsigned int sg_page_sizes;
37 st = kmalloc(sizeof(*st), GFP_KERNEL);
41 if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
46 flags = I915_ALLOC_MIN_PAGE_SIZE;
47 if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
48 flags |= I915_ALLOC_CONTIGUOUS;
50 ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
54 GEM_BUG_ON(list_empty(blocks));
59 prev_end = (resource_size_t)-1;
61 list_for_each_entry(block, blocks, link) {
62 u64 block_size, offset;
64 block_size = min_t(u64, size,
65 i915_buddy_block_size(&mem->mm, block));
66 offset = i915_buddy_block_offset(block);
71 if (offset != prev_end || sg->length >= max_segment) {
73 sg_page_sizes |= sg->length;
77 sg_dma_address(sg) = mem->region.start + offset;
83 len = min(block_size, max_segment - sg->length);
85 sg_dma_len(sg) += len;
94 sg_page_sizes |= sg->length;
98 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
108 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
109 struct intel_memory_region *mem,
112 INIT_LIST_HEAD(&obj->mm.blocks);
113 obj->mm.region = intel_memory_region_get(mem);
116 if (obj->base.size <= mem->min_page_size)
117 obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
119 mutex_lock(&mem->objects.lock);
121 if (obj->flags & I915_BO_ALLOC_VOLATILE)
122 list_add(&obj->mm.region_link, &mem->objects.purgeable);
124 list_add(&obj->mm.region_link, &mem->objects.list);
126 mutex_unlock(&mem->objects.lock);
129 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
131 struct intel_memory_region *mem = obj->mm.region;
133 mutex_lock(&mem->objects.lock);
134 list_del(&obj->mm.region_link);
135 mutex_unlock(&mem->objects.lock);
137 intel_memory_region_put(mem);
140 struct drm_i915_gem_object *
141 i915_gem_object_create_region(struct intel_memory_region *mem,
142 resource_size_t size,
145 struct drm_i915_gem_object *obj;
148 * NB: Our use of resource_size_t for the size stems from using struct
149 * resource for the mem->region. We might need to revisit this in the
153 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
156 return ERR_PTR(-ENODEV);
158 size = round_up(size, mem->min_page_size);
161 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
164 * XXX: There is a prevalence of the assumption that we fit the
165 * object's page count inside a 32bit _signed_ variable. Let's document
166 * this and catch if we ever need to fix it. In the meantime, if you do
167 * spot such a local variable, please consider fixing!
170 if (size >> PAGE_SHIFT > INT_MAX)
171 return ERR_PTR(-E2BIG);
173 if (overflows_type(size, obj->base.size))
174 return ERR_PTR(-E2BIG);
176 obj = mem->ops->create_object(mem, size, flags);
178 trace_i915_gem_object_create(obj);