Merge branch 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_internal.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <linux/scatterlist.h>
8 #include <linux/slab.h>
9 #include <linux/swiotlb.h>
10
11 #include "i915_drv.h"
12 #include "i915_gem.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_utils.h"
16
17 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
18 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
19
20 static void internal_free_pages(struct sg_table *st)
21 {
22         struct scatterlist *sg;
23
24         for (sg = st->sgl; sg; sg = __sg_next(sg)) {
25                 if (sg_page(sg))
26                         __free_pages(sg_page(sg), get_order(sg->length));
27         }
28
29         sg_free_table(st);
30         kfree(st);
31 }
32
33 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
34 {
35         struct drm_i915_private *i915 = to_i915(obj->base.dev);
36         struct sg_table *st;
37         struct scatterlist *sg;
38         unsigned int sg_page_sizes;
39         unsigned int npages;
40         int max_order;
41         gfp_t gfp;
42
43         max_order = MAX_ORDER;
44 #ifdef CONFIG_SWIOTLB
45         if (is_swiotlb_active(obj->base.dev->dev)) {
46                 unsigned int max_segment;
47
48                 max_segment = swiotlb_max_segment();
49                 if (max_segment) {
50                         max_segment = max_t(unsigned int, max_segment,
51                                             PAGE_SIZE) >> PAGE_SHIFT;
52                         max_order = min(max_order, ilog2(max_segment));
53                 }
54         }
55 #endif
56
57         gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
58         if (IS_I965GM(i915) || IS_I965G(i915)) {
59                 /* 965gm cannot relocate objects above 4GiB. */
60                 gfp &= ~__GFP_HIGHMEM;
61                 gfp |= __GFP_DMA32;
62         }
63
64 create_st:
65         st = kmalloc(sizeof(*st), GFP_KERNEL);
66         if (!st)
67                 return -ENOMEM;
68
69         npages = obj->base.size / PAGE_SIZE;
70         if (sg_alloc_table(st, npages, GFP_KERNEL)) {
71                 kfree(st);
72                 return -ENOMEM;
73         }
74
75         sg = st->sgl;
76         st->nents = 0;
77         sg_page_sizes = 0;
78
79         do {
80                 int order = min(fls(npages) - 1, max_order);
81                 struct page *page;
82
83                 do {
84                         page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
85                                            order);
86                         if (page)
87                                 break;
88                         if (!order--)
89                                 goto err;
90
91                         /* Limit subsequent allocations as well */
92                         max_order = order;
93                 } while (1);
94
95                 sg_set_page(sg, page, PAGE_SIZE << order, 0);
96                 sg_page_sizes |= PAGE_SIZE << order;
97                 st->nents++;
98
99                 npages -= 1 << order;
100                 if (!npages) {
101                         sg_mark_end(sg);
102                         break;
103                 }
104
105                 sg = __sg_next(sg);
106         } while (1);
107
108         if (i915_gem_gtt_prepare_pages(obj, st)) {
109                 /* Failed to dma-map try again with single page sg segments */
110                 if (get_order(st->sgl->length)) {
111                         internal_free_pages(st);
112                         max_order = 0;
113                         goto create_st;
114                 }
115                 goto err;
116         }
117
118         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
119
120         return 0;
121
122 err:
123         sg_set_page(sg, NULL, 0, 0);
124         sg_mark_end(sg);
125         internal_free_pages(st);
126
127         return -ENOMEM;
128 }
129
130 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
131                                                struct sg_table *pages)
132 {
133         i915_gem_gtt_finish_pages(obj, pages);
134         internal_free_pages(pages);
135
136         obj->mm.dirty = false;
137 }
138
139 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
140         .name = "i915_gem_object_internal",
141         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
142         .get_pages = i915_gem_object_get_pages_internal,
143         .put_pages = i915_gem_object_put_pages_internal,
144 };
145
146 /**
147  * i915_gem_object_create_internal: create an object with volatile pages
148  * @i915: the i915 device
149  * @size: the size in bytes of backing storage to allocate for the object
150  *
151  * Creates a new object that wraps some internal memory for private use.
152  * This object is not backed by swappable storage, and as such its contents
153  * are volatile and only valid whilst pinned. If the object is reaped by the
154  * shrinker, its pages and data will be discarded. Equally, it is not a full
155  * GEM object and so not valid for access from userspace. This makes it useful
156  * for hardware interfaces like ringbuffers (which are pinned from the time
157  * the request is written to the time the hardware stops accessing it), but
158  * not for contexts (which need to be preserved when not active for later
159  * reuse). Note that it is not cleared upon allocation.
160  */
161 struct drm_i915_gem_object *
162 i915_gem_object_create_internal(struct drm_i915_private *i915,
163                                 phys_addr_t size)
164 {
165         static struct lock_class_key lock_class;
166         struct drm_i915_gem_object *obj;
167         unsigned int cache_level;
168
169         GEM_BUG_ON(!size);
170         GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
171
172         if (overflows_type(size, obj->base.size))
173                 return ERR_PTR(-E2BIG);
174
175         obj = i915_gem_object_alloc();
176         if (!obj)
177                 return ERR_PTR(-ENOMEM);
178
179         drm_gem_private_object_init(&i915->drm, &obj->base, size);
180         i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
181         obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
182
183         /*
184          * Mark the object as volatile, such that the pages are marked as
185          * dontneed whilst they are still pinned. As soon as they are unpinned
186          * they are allowed to be reaped by the shrinker, and the caller is
187          * expected to repopulate - the contents of this object are only valid
188          * whilst active and pinned.
189          */
190         i915_gem_object_set_volatile(obj);
191
192         obj->read_domains = I915_GEM_DOMAIN_CPU;
193         obj->write_domain = I915_GEM_DOMAIN_CPU;
194
195         cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
196         i915_gem_object_set_cache_coherency(obj, cache_level);
197
198         return obj;
199 }