Merge tag 'samsung-soc-5.10' of https://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_stolen.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_stolen.h"
16 #include "i915_vgpu.h"
17
18 /*
19  * The BIOS typically reserves some of the system's memory for the exclusive
20  * use of the integrated graphics. This memory is no longer available for
21  * use by the OS and so the user finds that his system has less memory
22  * available than he put in. We refer to this memory as stolen.
23  *
24  * The BIOS will allocate its framebuffer from the stolen memory. Our
25  * goal is try to reuse that object for our own fbcon which must always
26  * be available for panics. Anything else we can reuse the stolen memory
27  * for is a boon.
28  */
29
30 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
31                                          struct drm_mm_node *node, u64 size,
32                                          unsigned alignment, u64 start, u64 end)
33 {
34         int ret;
35
36         if (!drm_mm_initialized(&i915->mm.stolen))
37                 return -ENODEV;
38
39         /* WaSkipStolenMemoryFirstPage:bdw+ */
40         if (INTEL_GEN(i915) >= 8 && start < 4096)
41                 start = 4096;
42
43         mutex_lock(&i915->mm.stolen_lock);
44         ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
45                                           size, alignment, 0,
46                                           start, end, DRM_MM_INSERT_BEST);
47         mutex_unlock(&i915->mm.stolen_lock);
48
49         return ret;
50 }
51
52 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
53                                 struct drm_mm_node *node, u64 size,
54                                 unsigned alignment)
55 {
56         return i915_gem_stolen_insert_node_in_range(i915, node, size,
57                                                     alignment, 0, U64_MAX);
58 }
59
60 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
61                                  struct drm_mm_node *node)
62 {
63         mutex_lock(&i915->mm.stolen_lock);
64         drm_mm_remove_node(node);
65         mutex_unlock(&i915->mm.stolen_lock);
66 }
67
68 static int i915_adjust_stolen(struct drm_i915_private *i915,
69                               struct resource *dsm)
70 {
71         struct i915_ggtt *ggtt = &i915->ggtt;
72         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
73         struct resource *r;
74
75         if (dsm->start == 0 || dsm->end <= dsm->start)
76                 return -EINVAL;
77
78         /*
79          * TODO: We have yet too encounter the case where the GTT wasn't at the
80          * end of stolen. With that assumption we could simplify this.
81          */
82
83         /* Make sure we don't clobber the GTT if it's within stolen memory */
84         if (INTEL_GEN(i915) <= 4 &&
85             !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
86                 struct resource stolen[2] = {*dsm, *dsm};
87                 struct resource ggtt_res;
88                 resource_size_t ggtt_start;
89
90                 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
91                 if (IS_GEN(i915, 4))
92                         ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
93                                      (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
94                 else
95                         ggtt_start &= PGTBL_ADDRESS_LO_MASK;
96
97                 ggtt_res =
98                         (struct resource) DEFINE_RES_MEM(ggtt_start,
99                                                          ggtt_total_entries(ggtt) * 4);
100
101                 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
102                         stolen[0].end = ggtt_res.start;
103                 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
104                         stolen[1].start = ggtt_res.end;
105
106                 /* Pick the larger of the two chunks */
107                 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
108                         *dsm = stolen[0];
109                 else
110                         *dsm = stolen[1];
111
112                 if (stolen[0].start != stolen[1].start ||
113                     stolen[0].end != stolen[1].end) {
114                         drm_dbg(&i915->drm,
115                                 "GTT within stolen memory at %pR\n",
116                                 &ggtt_res);
117                         drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
118                                 dsm);
119                 }
120         }
121
122         /*
123          * Verify that nothing else uses this physical address. Stolen
124          * memory should be reserved by the BIOS and hidden from the
125          * kernel. So if the region is already marked as busy, something
126          * is seriously wrong.
127          */
128         r = devm_request_mem_region(i915->drm.dev, dsm->start,
129                                     resource_size(dsm),
130                                     "Graphics Stolen Memory");
131         if (r == NULL) {
132                 /*
133                  * One more attempt but this time requesting region from
134                  * start + 1, as we have seen that this resolves the region
135                  * conflict with the PCI Bus.
136                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
137                  * PCI bus, but have an off-by-one error. Hence retry the
138                  * reservation starting from 1 instead of 0.
139                  * There's also BIOS with off-by-one on the other end.
140                  */
141                 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
142                                             resource_size(dsm) - 2,
143                                             "Graphics Stolen Memory");
144                 /*
145                  * GEN3 firmware likes to smash pci bridges into the stolen
146                  * range. Apparently this works.
147                  */
148                 if (!r && !IS_GEN(i915, 3)) {
149                         drm_err(&i915->drm,
150                                 "conflict detected with stolen region: %pR\n",
151                                 dsm);
152
153                         return -EBUSY;
154                 }
155         }
156
157         return 0;
158 }
159
160 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
161 {
162         if (!drm_mm_initialized(&i915->mm.stolen))
163                 return;
164
165         drm_mm_takedown(&i915->mm.stolen);
166 }
167
168 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
169                                     struct intel_uncore *uncore,
170                                     resource_size_t *base,
171                                     resource_size_t *size)
172 {
173         u32 reg_val = intel_uncore_read(uncore,
174                                         IS_GM45(i915) ?
175                                         CTG_STOLEN_RESERVED :
176                                         ELK_STOLEN_RESERVED);
177         resource_size_t stolen_top = i915->dsm.end + 1;
178
179         drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
180                 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
181
182         if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
183                 return;
184
185         /*
186          * Whether ILK really reuses the ELK register for this is unclear.
187          * Let's see if we catch anyone with this supposedly enabled on ILK.
188          */
189         drm_WARN(&i915->drm, IS_GEN(i915, 5),
190                  "ILK stolen reserved found? 0x%08x\n",
191                  reg_val);
192
193         if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
194                 return;
195
196         *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
197         drm_WARN_ON(&i915->drm,
198                     (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
199
200         *size = stolen_top - *base;
201 }
202
203 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
204                                      struct intel_uncore *uncore,
205                                      resource_size_t *base,
206                                      resource_size_t *size)
207 {
208         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
209
210         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
211
212         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
213                 return;
214
215         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
216
217         switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
218         case GEN6_STOLEN_RESERVED_1M:
219                 *size = 1024 * 1024;
220                 break;
221         case GEN6_STOLEN_RESERVED_512K:
222                 *size = 512 * 1024;
223                 break;
224         case GEN6_STOLEN_RESERVED_256K:
225                 *size = 256 * 1024;
226                 break;
227         case GEN6_STOLEN_RESERVED_128K:
228                 *size = 128 * 1024;
229                 break;
230         default:
231                 *size = 1024 * 1024;
232                 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
233         }
234 }
235
236 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
237                                     struct intel_uncore *uncore,
238                                     resource_size_t *base,
239                                     resource_size_t *size)
240 {
241         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
242         resource_size_t stolen_top = i915->dsm.end + 1;
243
244         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
245
246         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
247                 return;
248
249         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
250         default:
251                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
252                 fallthrough;
253         case GEN7_STOLEN_RESERVED_1M:
254                 *size = 1024 * 1024;
255                 break;
256         }
257
258         /*
259          * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
260          * reserved location as (top - size).
261          */
262         *base = stolen_top - *size;
263 }
264
265 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
266                                      struct intel_uncore *uncore,
267                                      resource_size_t *base,
268                                      resource_size_t *size)
269 {
270         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
271
272         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
273
274         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
275                 return;
276
277         *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
278
279         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
280         case GEN7_STOLEN_RESERVED_1M:
281                 *size = 1024 * 1024;
282                 break;
283         case GEN7_STOLEN_RESERVED_256K:
284                 *size = 256 * 1024;
285                 break;
286         default:
287                 *size = 1024 * 1024;
288                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
289         }
290 }
291
292 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
293                                     struct intel_uncore *uncore,
294                                     resource_size_t *base,
295                                     resource_size_t *size)
296 {
297         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
298
299         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
300
301         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
302                 return;
303
304         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
305
306         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
307         case GEN8_STOLEN_RESERVED_1M:
308                 *size = 1024 * 1024;
309                 break;
310         case GEN8_STOLEN_RESERVED_2M:
311                 *size = 2 * 1024 * 1024;
312                 break;
313         case GEN8_STOLEN_RESERVED_4M:
314                 *size = 4 * 1024 * 1024;
315                 break;
316         case GEN8_STOLEN_RESERVED_8M:
317                 *size = 8 * 1024 * 1024;
318                 break;
319         default:
320                 *size = 8 * 1024 * 1024;
321                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
322         }
323 }
324
325 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
326                                     struct intel_uncore *uncore,
327                                     resource_size_t *base,
328                                     resource_size_t *size)
329 {
330         u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
331         resource_size_t stolen_top = i915->dsm.end + 1;
332
333         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
334
335         if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
336                 return;
337
338         if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
339                 return;
340
341         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
342         *size = stolen_top - *base;
343 }
344
345 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
346                                     struct intel_uncore *uncore,
347                                     resource_size_t *base,
348                                     resource_size_t *size)
349 {
350         u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
351
352         drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
353
354         *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
355
356         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
357         case GEN8_STOLEN_RESERVED_1M:
358                 *size = 1024 * 1024;
359                 break;
360         case GEN8_STOLEN_RESERVED_2M:
361                 *size = 2 * 1024 * 1024;
362                 break;
363         case GEN8_STOLEN_RESERVED_4M:
364                 *size = 4 * 1024 * 1024;
365                 break;
366         case GEN8_STOLEN_RESERVED_8M:
367                 *size = 8 * 1024 * 1024;
368                 break;
369         default:
370                 *size = 8 * 1024 * 1024;
371                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
372         }
373 }
374
375 static int i915_gem_init_stolen(struct drm_i915_private *i915)
376 {
377         struct intel_uncore *uncore = &i915->uncore;
378         resource_size_t reserved_base, stolen_top;
379         resource_size_t reserved_total, reserved_size;
380
381         mutex_init(&i915->mm.stolen_lock);
382
383         if (intel_vgpu_active(i915)) {
384                 drm_notice(&i915->drm,
385                            "%s, disabling use of stolen memory\n",
386                            "iGVT-g active");
387                 return 0;
388         }
389
390         if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
391                 drm_notice(&i915->drm,
392                            "%s, disabling use of stolen memory\n",
393                            "DMAR active");
394                 return 0;
395         }
396
397         if (resource_size(&intel_graphics_stolen_res) == 0)
398                 return 0;
399
400         i915->dsm = intel_graphics_stolen_res;
401
402         if (i915_adjust_stolen(i915, &i915->dsm))
403                 return 0;
404
405         GEM_BUG_ON(i915->dsm.start == 0);
406         GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
407
408         stolen_top = i915->dsm.end + 1;
409         reserved_base = stolen_top;
410         reserved_size = 0;
411
412         switch (INTEL_GEN(i915)) {
413         case 2:
414         case 3:
415                 break;
416         case 4:
417                 if (!IS_G4X(i915))
418                         break;
419                 fallthrough;
420         case 5:
421                 g4x_get_stolen_reserved(i915, uncore,
422                                         &reserved_base, &reserved_size);
423                 break;
424         case 6:
425                 gen6_get_stolen_reserved(i915, uncore,
426                                          &reserved_base, &reserved_size);
427                 break;
428         case 7:
429                 if (IS_VALLEYVIEW(i915))
430                         vlv_get_stolen_reserved(i915, uncore,
431                                                 &reserved_base, &reserved_size);
432                 else
433                         gen7_get_stolen_reserved(i915, uncore,
434                                                  &reserved_base, &reserved_size);
435                 break;
436         case 8:
437         case 9:
438         case 10:
439                 if (IS_LP(i915))
440                         chv_get_stolen_reserved(i915, uncore,
441                                                 &reserved_base, &reserved_size);
442                 else
443                         bdw_get_stolen_reserved(i915, uncore,
444                                                 &reserved_base, &reserved_size);
445                 break;
446         default:
447                 MISSING_CASE(INTEL_GEN(i915));
448                 fallthrough;
449         case 11:
450         case 12:
451                 icl_get_stolen_reserved(i915, uncore,
452                                         &reserved_base,
453                                         &reserved_size);
454                 break;
455         }
456
457         /*
458          * Our expectation is that the reserved space is at the top of the
459          * stolen region and *never* at the bottom. If we see !reserved_base,
460          * it likely means we failed to read the registers correctly.
461          */
462         if (!reserved_base) {
463                 drm_err(&i915->drm,
464                         "inconsistent reservation %pa + %pa; ignoring\n",
465                         &reserved_base, &reserved_size);
466                 reserved_base = stolen_top;
467                 reserved_size = 0;
468         }
469
470         i915->dsm_reserved =
471                 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
472
473         if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
474                 drm_err(&i915->drm,
475                         "Stolen reserved area %pR outside stolen memory %pR\n",
476                         &i915->dsm_reserved, &i915->dsm);
477                 return 0;
478         }
479
480         /* It is possible for the reserved area to end before the end of stolen
481          * memory, so just consider the start. */
482         reserved_total = stolen_top - reserved_base;
483
484         drm_dbg(&i915->drm,
485                 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
486                 (u64)resource_size(&i915->dsm) >> 10,
487                 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
488
489         i915->stolen_usable_size =
490                 resource_size(&i915->dsm) - reserved_total;
491
492         /* Basic memrange allocator for stolen space. */
493         drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
494
495         return 0;
496 }
497
498 static struct sg_table *
499 i915_pages_create_for_stolen(struct drm_device *dev,
500                              resource_size_t offset, resource_size_t size)
501 {
502         struct drm_i915_private *i915 = to_i915(dev);
503         struct sg_table *st;
504         struct scatterlist *sg;
505
506         GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
507
508         /* We hide that we have no struct page backing our stolen object
509          * by wrapping the contiguous physical allocation with a fake
510          * dma mapping in a single scatterlist.
511          */
512
513         st = kmalloc(sizeof(*st), GFP_KERNEL);
514         if (st == NULL)
515                 return ERR_PTR(-ENOMEM);
516
517         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
518                 kfree(st);
519                 return ERR_PTR(-ENOMEM);
520         }
521
522         sg = st->sgl;
523         sg->offset = 0;
524         sg->length = size;
525
526         sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
527         sg_dma_len(sg) = size;
528
529         return st;
530 }
531
532 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
533 {
534         struct sg_table *pages =
535                 i915_pages_create_for_stolen(obj->base.dev,
536                                              obj->stolen->start,
537                                              obj->stolen->size);
538         if (IS_ERR(pages))
539                 return PTR_ERR(pages);
540
541         __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
542
543         return 0;
544 }
545
546 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
547                                              struct sg_table *pages)
548 {
549         /* Should only be called from i915_gem_object_release_stolen() */
550         sg_free_table(pages);
551         kfree(pages);
552 }
553
554 static void
555 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
556 {
557         struct drm_i915_private *i915 = to_i915(obj->base.dev);
558         struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
559
560         GEM_BUG_ON(!stolen);
561
562         i915_gem_object_release_memory_region(obj);
563
564         i915_gem_stolen_remove_node(i915, stolen);
565         kfree(stolen);
566 }
567
568 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
569         .name = "i915_gem_object_stolen",
570         .get_pages = i915_gem_object_get_pages_stolen,
571         .put_pages = i915_gem_object_put_pages_stolen,
572         .release = i915_gem_object_release_stolen,
573 };
574
575 static struct drm_i915_gem_object *
576 __i915_gem_object_create_stolen(struct intel_memory_region *mem,
577                                 struct drm_mm_node *stolen)
578 {
579         static struct lock_class_key lock_class;
580         struct drm_i915_gem_object *obj;
581         unsigned int cache_level;
582         int err = -ENOMEM;
583
584         obj = i915_gem_object_alloc();
585         if (!obj)
586                 goto err;
587
588         drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
589         i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
590
591         obj->stolen = stolen;
592         obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
593         cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
594         i915_gem_object_set_cache_coherency(obj, cache_level);
595
596         err = i915_gem_object_pin_pages(obj);
597         if (err)
598                 goto cleanup;
599
600         i915_gem_object_init_memory_region(obj, mem, 0);
601
602         return obj;
603
604 cleanup:
605         i915_gem_object_free(obj);
606 err:
607         return ERR_PTR(err);
608 }
609
610 static struct drm_i915_gem_object *
611 _i915_gem_object_create_stolen(struct intel_memory_region *mem,
612                                resource_size_t size,
613                                unsigned int flags)
614 {
615         struct drm_i915_private *i915 = mem->i915;
616         struct drm_i915_gem_object *obj;
617         struct drm_mm_node *stolen;
618         int ret;
619
620         if (!drm_mm_initialized(&i915->mm.stolen))
621                 return ERR_PTR(-ENODEV);
622
623         if (size == 0)
624                 return ERR_PTR(-EINVAL);
625
626         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
627         if (!stolen)
628                 return ERR_PTR(-ENOMEM);
629
630         ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
631         if (ret) {
632                 obj = ERR_PTR(ret);
633                 goto err_free;
634         }
635
636         obj = __i915_gem_object_create_stolen(mem, stolen);
637         if (IS_ERR(obj))
638                 goto err_remove;
639
640         return obj;
641
642 err_remove:
643         i915_gem_stolen_remove_node(i915, stolen);
644 err_free:
645         kfree(stolen);
646         return obj;
647 }
648
649 struct drm_i915_gem_object *
650 i915_gem_object_create_stolen(struct drm_i915_private *i915,
651                               resource_size_t size)
652 {
653         return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
654                                              size, I915_BO_ALLOC_CONTIGUOUS);
655 }
656
657 static int init_stolen(struct intel_memory_region *mem)
658 {
659         intel_memory_region_set_name(mem, "stolen");
660
661         /*
662          * Initialise stolen early so that we may reserve preallocated
663          * objects for the BIOS to KMS transition.
664          */
665         return i915_gem_init_stolen(mem->i915);
666 }
667
668 static void release_stolen(struct intel_memory_region *mem)
669 {
670         i915_gem_cleanup_stolen(mem->i915);
671 }
672
673 static const struct intel_memory_region_ops i915_region_stolen_ops = {
674         .init = init_stolen,
675         .release = release_stolen,
676         .create_object = _i915_gem_object_create_stolen,
677 };
678
679 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
680 {
681         return intel_memory_region_create(i915,
682                                           intel_graphics_stolen_res.start,
683                                           resource_size(&intel_graphics_stolen_res),
684                                           PAGE_SIZE, 0,
685                                           &i915_region_stolen_ops);
686 }
687
688 struct drm_i915_gem_object *
689 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
690                                                resource_size_t stolen_offset,
691                                                resource_size_t size)
692 {
693         struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
694         struct drm_i915_gem_object *obj;
695         struct drm_mm_node *stolen;
696         int ret;
697
698         if (!drm_mm_initialized(&i915->mm.stolen))
699                 return ERR_PTR(-ENODEV);
700
701         drm_dbg(&i915->drm,
702                 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
703                 &stolen_offset, &size);
704
705         /* KISS and expect everything to be page-aligned */
706         if (GEM_WARN_ON(size == 0) ||
707             GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
708             GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
709                 return ERR_PTR(-EINVAL);
710
711         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
712         if (!stolen)
713                 return ERR_PTR(-ENOMEM);
714
715         stolen->start = stolen_offset;
716         stolen->size = size;
717         mutex_lock(&i915->mm.stolen_lock);
718         ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
719         mutex_unlock(&i915->mm.stolen_lock);
720         if (ret) {
721                 obj = ERR_PTR(ret);
722                 goto err_free;
723         }
724
725         obj = __i915_gem_object_create_stolen(mem, stolen);
726         if (IS_ERR(obj))
727                 goto err_stolen;
728
729         i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
730         return obj;
731
732 err_stolen:
733         i915_gem_stolen_remove_node(i915, stolen);
734 err_free:
735         kfree(stolen);
736         return obj;
737 }