Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_region_lmem.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_memory_region.h"
8 #include "gem/i915_gem_lmem.h"
9 #include "gem/i915_gem_region.h"
10 #include "intel_region_lmem.h"
11
12 static int init_fake_lmem_bar(struct intel_memory_region *mem)
13 {
14         struct drm_i915_private *i915 = mem->i915;
15         struct i915_ggtt *ggtt = &i915->ggtt;
16         unsigned long n;
17         int ret;
18
19         /* We want to 1:1 map the mappable aperture to our reserved region */
20
21         mem->fake_mappable.start = 0;
22         mem->fake_mappable.size = resource_size(&mem->region);
23         mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
24
25         ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
26         if (ret)
27                 return ret;
28
29         mem->remap_addr = dma_map_resource(i915->drm.dev,
30                                            mem->region.start,
31                                            mem->fake_mappable.size,
32                                            PCI_DMA_BIDIRECTIONAL,
33                                            DMA_ATTR_FORCE_CONTIGUOUS);
34         if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
35                 drm_mm_remove_node(&mem->fake_mappable);
36                 return -EINVAL;
37         }
38
39         for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
40                 ggtt->vm.insert_page(&ggtt->vm,
41                                      mem->remap_addr + (n << PAGE_SHIFT),
42                                      n << PAGE_SHIFT,
43                                      I915_CACHE_NONE, 0);
44         }
45
46         mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
47                                                       mem->fake_mappable.size);
48
49         return 0;
50 }
51
52 static void release_fake_lmem_bar(struct intel_memory_region *mem)
53 {
54         if (!drm_mm_node_allocated(&mem->fake_mappable))
55                 return;
56
57         drm_mm_remove_node(&mem->fake_mappable);
58
59         dma_unmap_resource(mem->i915->drm.dev,
60                            mem->remap_addr,
61                            mem->fake_mappable.size,
62                            PCI_DMA_BIDIRECTIONAL,
63                            DMA_ATTR_FORCE_CONTIGUOUS);
64 }
65
66 static void
67 region_lmem_release(struct intel_memory_region *mem)
68 {
69         release_fake_lmem_bar(mem);
70         io_mapping_fini(&mem->iomap);
71         intel_memory_region_release_buddy(mem);
72 }
73
74 static int
75 region_lmem_init(struct intel_memory_region *mem)
76 {
77         int ret;
78
79         if (mem->i915->params.fake_lmem_start) {
80                 ret = init_fake_lmem_bar(mem);
81                 GEM_BUG_ON(ret);
82         }
83
84         if (!io_mapping_init_wc(&mem->iomap,
85                                 mem->io_start,
86                                 resource_size(&mem->region)))
87                 return -EIO;
88
89         ret = intel_memory_region_init_buddy(mem);
90         if (ret)
91                 io_mapping_fini(&mem->iomap);
92
93         return ret;
94 }
95
96 static const struct intel_memory_region_ops intel_region_lmem_ops = {
97         .init = region_lmem_init,
98         .release = region_lmem_release,
99         .init_object = __i915_gem_lmem_object_init,
100 };
101
102 struct intel_memory_region *
103 intel_gt_setup_fake_lmem(struct intel_gt *gt)
104 {
105         struct drm_i915_private *i915 = gt->i915;
106         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
107         struct intel_memory_region *mem;
108         resource_size_t mappable_end;
109         resource_size_t io_start;
110         resource_size_t start;
111
112         if (!HAS_LMEM(i915))
113                 return ERR_PTR(-ENODEV);
114
115         if (!i915->params.fake_lmem_start)
116                 return ERR_PTR(-ENODEV);
117
118         GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
119
120         /* Your mappable aperture belongs to me now! */
121         mappable_end = pci_resource_len(pdev, 2);
122         io_start = pci_resource_start(pdev, 2);
123         start = i915->params.fake_lmem_start;
124
125         mem = intel_memory_region_create(i915,
126                                          start,
127                                          mappable_end,
128                                          PAGE_SIZE,
129                                          io_start,
130                                          &intel_region_lmem_ops);
131         if (!IS_ERR(mem)) {
132                 drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
133                          &mem->region);
134                 drm_info(&i915->drm,
135                          "Intel graphics fake LMEM IO start: %llx\n",
136                         (u64)mem->io_start);
137                 drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
138                          (u64)resource_size(&mem->region));
139         }
140
141         return mem;
142 }
143
144 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
145                                      u64 *start, u32 *size)
146 {
147         if (!IS_DG1_REVID(uncore->i915, DG1_REVID_A0, DG1_REVID_B0))
148                 return false;
149
150         *start = 0;
151         *size = SZ_1M;
152
153         drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
154                 *start, *start + *size);
155
156         return true;
157 }
158
159 static int reserve_lowmem_region(struct intel_uncore *uncore,
160                                  struct intel_memory_region *mem)
161 {
162         u64 reserve_start;
163         u32 reserve_size;
164         int ret;
165
166         if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
167                 return 0;
168
169         ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
170         if (ret)
171                 drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
172
173         return ret;
174 }
175
176 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
177 {
178         struct drm_i915_private *i915 = gt->i915;
179         struct intel_uncore *uncore = gt->uncore;
180         struct pci_dev *pdev = i915->drm.pdev;
181         struct intel_memory_region *mem;
182         resource_size_t io_start;
183         resource_size_t lmem_size;
184         int err;
185
186         if (!IS_DGFX(i915))
187                 return ERR_PTR(-ENODEV);
188
189         /* Stolen starts from GSMBASE on DG1 */
190         lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
191
192         io_start = pci_resource_start(pdev, 2);
193         if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
194                 return ERR_PTR(-ENODEV);
195
196         mem = intel_memory_region_create(i915,
197                                          0,
198                                          lmem_size,
199                                          I915_GTT_PAGE_SIZE_4K,
200                                          io_start,
201                                          &intel_region_lmem_ops);
202         if (IS_ERR(mem))
203                 return mem;
204
205         err = reserve_lowmem_region(uncore, mem);
206         if (err)
207                 goto err_region_put;
208
209         drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
210         drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
211                 &mem->io_start);
212         drm_info(&i915->drm, "Local memory available: %pa\n",
213                  &lmem_size);
214
215         return mem;
216
217 err_region_put:
218         intel_memory_region_put(mem);
219         return ERR_PTR(err);
220 }
221
222 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
223 {
224         return setup_lmem(gt);
225 }