d94914a8673715822e50bccbcec8043ea4fe7201
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9
10 void
11 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
12                                 struct sg_table *pages)
13 {
14         __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
15
16         obj->mm.dirty = false;
17         sg_free_table(pages);
18         kfree(pages);
19 }
20
21 int
22 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
23 {
24         struct intel_memory_region *mem = obj->mm.region;
25         struct list_head *blocks = &obj->mm.blocks;
26         resource_size_t size = obj->base.size;
27         resource_size_t prev_end;
28         struct i915_buddy_block *block;
29         unsigned int flags;
30         struct sg_table *st;
31         struct scatterlist *sg;
32         unsigned int sg_page_sizes;
33         int ret;
34
35         st = kmalloc(sizeof(*st), GFP_KERNEL);
36         if (!st)
37                 return -ENOMEM;
38
39         if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
40                 kfree(st);
41                 return -ENOMEM;
42         }
43
44         flags = I915_ALLOC_MIN_PAGE_SIZE;
45         if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
46                 flags |= I915_ALLOC_CONTIGUOUS;
47
48         ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
49         if (ret)
50                 goto err_free_sg;
51
52         GEM_BUG_ON(list_empty(blocks));
53
54         sg = st->sgl;
55         st->nents = 0;
56         sg_page_sizes = 0;
57         prev_end = (resource_size_t)-1;
58
59         list_for_each_entry(block, blocks, link) {
60                 u64 block_size, offset;
61
62                 block_size = min_t(u64, size,
63                                    i915_buddy_block_size(&mem->mm, block));
64                 offset = i915_buddy_block_offset(block);
65
66                 GEM_BUG_ON(overflows_type(block_size, sg->length));
67
68                 if (offset != prev_end ||
69                     add_overflows_t(typeof(sg->length), sg->length, block_size)) {
70                         if (st->nents) {
71                                 sg_page_sizes |= sg->length;
72                                 sg = __sg_next(sg);
73                         }
74
75                         sg_dma_address(sg) = mem->region.start + offset;
76                         sg_dma_len(sg) = block_size;
77
78                         sg->length = block_size;
79
80                         st->nents++;
81                 } else {
82                         sg->length += block_size;
83                         sg_dma_len(sg) += block_size;
84                 }
85
86                 prev_end = offset + block_size;
87         };
88
89         sg_page_sizes |= sg->length;
90         sg_mark_end(sg);
91         i915_sg_trim(st);
92
93         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
94
95         return 0;
96
97 err_free_sg:
98         sg_free_table(st);
99         kfree(st);
100         return ret;
101 }
102
103 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
104                                         struct intel_memory_region *mem,
105                                         unsigned long flags)
106 {
107         INIT_LIST_HEAD(&obj->mm.blocks);
108         obj->mm.region = intel_memory_region_get(mem);
109         obj->flags |= flags;
110 }
111
112 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
113 {
114         intel_memory_region_put(obj->mm.region);
115 }
116
117 struct drm_i915_gem_object *
118 i915_gem_object_create_region(struct intel_memory_region *mem,
119                               resource_size_t size,
120                               unsigned int flags)
121 {
122         struct drm_i915_gem_object *obj;
123
124         /*
125          * NB: Our use of resource_size_t for the size stems from using struct
126          * resource for the mem->region. We might need to revisit this in the
127          * future.
128          */
129
130         GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
131
132         if (!mem)
133                 return ERR_PTR(-ENODEV);
134
135         size = round_up(size, mem->min_page_size);
136
137         GEM_BUG_ON(!size);
138         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
139
140         /*
141          * XXX: There is a prevalence of the assumption that we fit the
142          * object's page count inside a 32bit _signed_ variable. Let's document
143          * this and catch if we ever need to fix it. In the meantime, if you do
144          * spot such a local variable, please consider fixing!
145          */
146
147         if (size >> PAGE_SHIFT > INT_MAX)
148                 return ERR_PTR(-E2BIG);
149
150         if (overflows_type(size, obj->base.size))
151                 return ERR_PTR(-E2BIG);
152
153         return mem->ops->create_object(mem, size, flags);
154 }