Merge tag 'drm-misc-next-2021-10-14' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_region_lmem.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_memory_region.h"
8 #include "intel_region_lmem.h"
9 #include "intel_region_ttm.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_ttm.h"
13 #include "gt/intel_gt.h"
14
15 static int init_fake_lmem_bar(struct intel_memory_region *mem)
16 {
17         struct drm_i915_private *i915 = mem->i915;
18         struct i915_ggtt *ggtt = &i915->ggtt;
19         unsigned long n;
20         int ret;
21
22         /* We want to 1:1 map the mappable aperture to our reserved region */
23
24         mem->fake_mappable.start = 0;
25         mem->fake_mappable.size = resource_size(&mem->region);
26         mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
27
28         ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
29         if (ret)
30                 return ret;
31
32         mem->remap_addr = dma_map_resource(i915->drm.dev,
33                                            mem->region.start,
34                                            mem->fake_mappable.size,
35                                            DMA_BIDIRECTIONAL,
36                                            DMA_ATTR_FORCE_CONTIGUOUS);
37         if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
38                 drm_mm_remove_node(&mem->fake_mappable);
39                 return -EINVAL;
40         }
41
42         for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
43                 ggtt->vm.insert_page(&ggtt->vm,
44                                      mem->remap_addr + (n << PAGE_SHIFT),
45                                      n << PAGE_SHIFT,
46                                      I915_CACHE_NONE, 0);
47         }
48
49         mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
50                                                       mem->fake_mappable.size);
51
52         return 0;
53 }
54
55 static void release_fake_lmem_bar(struct intel_memory_region *mem)
56 {
57         if (!drm_mm_node_allocated(&mem->fake_mappable))
58                 return;
59
60         drm_mm_remove_node(&mem->fake_mappable);
61
62         dma_unmap_resource(mem->i915->drm.dev,
63                            mem->remap_addr,
64                            mem->fake_mappable.size,
65                            DMA_BIDIRECTIONAL,
66                            DMA_ATTR_FORCE_CONTIGUOUS);
67 }
68
69 static void
70 region_lmem_release(struct intel_memory_region *mem)
71 {
72         intel_region_ttm_fini(mem);
73         io_mapping_fini(&mem->iomap);
74         release_fake_lmem_bar(mem);
75 }
76
77 static int
78 region_lmem_init(struct intel_memory_region *mem)
79 {
80         int ret;
81
82         if (mem->i915->params.fake_lmem_start) {
83                 ret = init_fake_lmem_bar(mem);
84                 GEM_BUG_ON(ret);
85         }
86
87         if (!io_mapping_init_wc(&mem->iomap,
88                                 mem->io_start,
89                                 resource_size(&mem->region))) {
90                 ret = -EIO;
91                 goto out_no_io;
92         }
93
94         ret = intel_region_ttm_init(mem);
95         if (ret)
96                 goto out_no_buddy;
97
98         return 0;
99
100 out_no_buddy:
101         io_mapping_fini(&mem->iomap);
102 out_no_io:
103         release_fake_lmem_bar(mem);
104
105         return ret;
106 }
107
108 static const struct intel_memory_region_ops intel_region_lmem_ops = {
109         .init = region_lmem_init,
110         .release = region_lmem_release,
111         .init_object = __i915_gem_ttm_object_init,
112 };
113
114 struct intel_memory_region *
115 intel_gt_setup_fake_lmem(struct intel_gt *gt)
116 {
117         struct drm_i915_private *i915 = gt->i915;
118         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
119         struct intel_memory_region *mem;
120         resource_size_t mappable_end;
121         resource_size_t io_start;
122         resource_size_t start;
123
124         if (!HAS_LMEM(i915))
125                 return ERR_PTR(-ENODEV);
126
127         if (!i915->params.fake_lmem_start)
128                 return ERR_PTR(-ENODEV);
129
130         GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
131
132         /* Your mappable aperture belongs to me now! */
133         mappable_end = pci_resource_len(pdev, 2);
134         io_start = pci_resource_start(pdev, 2);
135         start = i915->params.fake_lmem_start;
136
137         mem = intel_memory_region_create(i915,
138                                          start,
139                                          mappable_end,
140                                          PAGE_SIZE,
141                                          io_start,
142                                          INTEL_MEMORY_LOCAL,
143                                          0,
144                                          &intel_region_lmem_ops);
145         if (!IS_ERR(mem)) {
146                 drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
147                          &mem->region);
148                 drm_info(&i915->drm,
149                          "Intel graphics fake LMEM IO start: %llx\n",
150                         (u64)mem->io_start);
151                 drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
152                          (u64)resource_size(&mem->region));
153         }
154
155         return mem;
156 }
157
158 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
159                                      u64 *start, u32 *size)
160 {
161         if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0))
162                 return false;
163
164         *start = 0;
165         *size = SZ_1M;
166
167         drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
168                 *start, *start + *size);
169
170         return true;
171 }
172
173 static int reserve_lowmem_region(struct intel_uncore *uncore,
174                                  struct intel_memory_region *mem)
175 {
176         u64 reserve_start;
177         u32 reserve_size;
178         int ret;
179
180         if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
181                 return 0;
182
183         ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
184         if (ret)
185                 drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
186
187         return ret;
188 }
189
190 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
191 {
192         struct drm_i915_private *i915 = gt->i915;
193         struct intel_uncore *uncore = gt->uncore;
194         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
195         struct intel_memory_region *mem;
196         resource_size_t io_start;
197         resource_size_t lmem_size;
198         int err;
199
200         if (!IS_DGFX(i915))
201                 return ERR_PTR(-ENODEV);
202
203         /* Stolen starts from GSMBASE on DG1 */
204         lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
205
206         io_start = pci_resource_start(pdev, 2);
207         if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
208                 return ERR_PTR(-ENODEV);
209
210         mem = intel_memory_region_create(i915,
211                                          0,
212                                          lmem_size,
213                                          I915_GTT_PAGE_SIZE_4K,
214                                          io_start,
215                                          INTEL_MEMORY_LOCAL,
216                                          0,
217                                          &intel_region_lmem_ops);
218         if (IS_ERR(mem))
219                 return mem;
220
221         err = reserve_lowmem_region(uncore, mem);
222         if (err)
223                 goto err_region_put;
224
225         drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
226         drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
227                 &mem->io_start);
228         drm_info(&i915->drm, "Local memory available: %pa\n",
229                  &lmem_size);
230
231         return mem;
232
233 err_region_put:
234         intel_memory_region_put(mem);
235         return ERR_PTR(err);
236 }
237
238 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
239 {
240         return setup_lmem(gt);
241 }