1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "gem/i915_gem_ioctls.h"
7 #include "gem/i915_gem_lmem.h"
8 #include "gem/i915_gem_region.h"
11 #include "i915_trace.h"
12 #include "i915_user_extensions.h"
14 static u32 object_max_page_size(struct drm_i915_gem_object *obj)
16 u32 max_page_size = 0;
19 for (i = 0; i < obj->mm.n_placements; i++) {
20 struct intel_memory_region *mr = obj->mm.placements[i];
22 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
23 max_page_size = max_t(u32, max_page_size, mr->min_page_size);
26 GEM_BUG_ON(!max_page_size);
30 static void object_set_placements(struct drm_i915_gem_object *obj,
31 struct intel_memory_region **placements,
32 unsigned int n_placements)
34 GEM_BUG_ON(!n_placements);
37 * For the common case of one memory region, skip storing an
38 * allocated array and just point at the region directly.
40 if (n_placements == 1) {
41 struct intel_memory_region *mr = placements[0];
42 struct drm_i915_private *i915 = mr->i915;
44 obj->mm.placements = &i915->mm.regions[mr->id];
45 obj->mm.n_placements = 1;
47 obj->mm.placements = placements;
48 obj->mm.n_placements = n_placements;
52 static int i915_gem_publish(struct drm_i915_gem_object *obj,
53 struct drm_file *file,
57 u64 size = obj->base.size;
60 ret = drm_gem_handle_create(file, &obj->base, handle_p);
61 /* drop reference from allocate - handle holds it now */
62 i915_gem_object_put(obj);
71 i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
73 struct intel_memory_region *mr = obj->mm.placements[0];
77 size = round_up(size, object_max_page_size(obj));
81 /* For most of the ABI (e.g. mmap) we think in system pages */
82 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
84 if (i915_gem_object_size_2big(size))
88 * For now resort to CPU based clearing for device local-memory, in the
89 * near future this will use the blitter engine for accelerated, GPU
93 if (mr->type == INTEL_MEMORY_LOCAL)
94 flags = I915_BO_ALLOC_CPU_CLEAR;
96 ret = mr->ops->init_object(mr, obj, size, flags);
100 GEM_BUG_ON(size != obj->base.size);
102 trace_i915_gem_object_create(obj);
107 i915_gem_dumb_create(struct drm_file *file,
108 struct drm_device *dev,
109 struct drm_mode_create_dumb *args)
111 struct drm_i915_gem_object *obj;
112 struct intel_memory_region *mr;
113 enum intel_memory_type mem_type;
114 int cpp = DIV_ROUND_UP(args->bpp, 8);
120 format = DRM_FORMAT_C8;
123 format = DRM_FORMAT_RGB565;
126 format = DRM_FORMAT_XRGB8888;
132 /* have to work out size/pitch and return them */
133 args->pitch = ALIGN(args->width * cpp, 64);
135 /* align stride to page size so that we can remap */
136 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
137 DRM_FORMAT_MOD_LINEAR))
138 args->pitch = ALIGN(args->pitch, 4096);
140 if (args->pitch < args->width)
143 args->size = mul_u32_u32(args->pitch, args->height);
145 mem_type = INTEL_MEMORY_SYSTEM;
146 if (HAS_LMEM(to_i915(dev)))
147 mem_type = INTEL_MEMORY_LOCAL;
149 obj = i915_gem_object_alloc();
153 mr = intel_memory_region_by_type(to_i915(dev), mem_type);
154 object_set_placements(obj, &mr, 1);
156 ret = i915_gem_setup(obj, args->size);
160 return i915_gem_publish(obj, file, &args->size, &args->handle);
163 i915_gem_object_free(obj);
168 * Creates a new mm object and returns a handle to it.
169 * @dev: drm device pointer
170 * @data: ioctl data blob
171 * @file: drm file pointer
174 i915_gem_create_ioctl(struct drm_device *dev, void *data,
175 struct drm_file *file)
177 struct drm_i915_private *i915 = to_i915(dev);
178 struct drm_i915_gem_create *args = data;
179 struct drm_i915_gem_object *obj;
180 struct intel_memory_region *mr;
183 i915_gem_flush_free_objects(i915);
185 obj = i915_gem_object_alloc();
189 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
190 object_set_placements(obj, &mr, 1);
192 ret = i915_gem_setup(obj, args->size);
196 return i915_gem_publish(obj, file, &args->size, &args->handle);
199 i915_gem_object_free(obj);
204 struct drm_i915_private *i915;
205 struct drm_i915_gem_object *vanilla_object;
208 static void repr_placements(char *buf, size_t size,
209 struct intel_memory_region **placements,
216 for (i = 0; i < n_placements; i++) {
217 struct intel_memory_region *mr = placements[i];
220 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
221 mr->name, mr->type, mr->instance);
230 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
231 struct create_ext *ext_data)
233 struct drm_i915_private *i915 = ext_data->i915;
234 struct drm_i915_gem_memory_class_instance __user *uregions =
235 u64_to_user_ptr(args->regions);
236 struct drm_i915_gem_object *obj = ext_data->vanilla_object;
237 struct intel_memory_region **placements;
242 drm_dbg(&i915->drm, "pad should be zero\n");
246 if (!args->num_regions) {
247 drm_dbg(&i915->drm, "num_regions is zero\n");
251 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
252 drm_dbg(&i915->drm, "num_regions is too large\n");
259 placements = kmalloc_array(args->num_regions,
260 sizeof(struct intel_memory_region *),
266 for (i = 0; i < args->num_regions; i++) {
267 struct drm_i915_gem_memory_class_instance region;
268 struct intel_memory_region *mr;
270 if (copy_from_user(®ion, uregions, sizeof(region))) {
275 mr = intel_memory_region_lookup(i915,
277 region.memory_instance);
278 if (!mr || mr->private) {
279 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
280 region.memory_class, region.memory_instance, i);
285 if (mask & BIT(mr->id)) {
286 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
287 mr->name, region.memory_class,
288 region.memory_instance, i);
299 if (obj->mm.placements) {
304 object_set_placements(obj, placements, args->num_regions);
305 if (args->num_regions == 1)
314 if (obj->mm.placements) {
318 obj->mm.n_placements);
320 "Placements were already set in previous EXT. Existing placements: %s\n",
324 repr_placements(buf, sizeof(buf), placements, i);
325 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
333 static int ext_set_placements(struct i915_user_extension __user *base,
336 struct drm_i915_gem_create_ext_memory_regions ext;
338 if (copy_from_user(&ext, base, sizeof(ext)))
341 return set_placements(&ext, data);
344 static const i915_user_extension_fn create_extensions[] = {
345 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
349 * Creates a new mm object and returns a handle to it.
350 * @dev: drm device pointer
351 * @data: ioctl data blob
352 * @file: drm file pointer
355 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
356 struct drm_file *file)
358 struct drm_i915_private *i915 = to_i915(dev);
359 struct drm_i915_gem_create_ext *args = data;
360 struct create_ext ext_data = { .i915 = i915 };
361 struct intel_memory_region **placements_ext;
362 struct drm_i915_gem_object *obj;
368 i915_gem_flush_free_objects(i915);
370 obj = i915_gem_object_alloc();
374 ext_data.vanilla_object = obj;
375 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
377 ARRAY_SIZE(create_extensions),
379 placements_ext = obj->mm.placements;
383 if (!placements_ext) {
384 struct intel_memory_region *mr =
385 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
387 object_set_placements(obj, &mr, 1);
390 ret = i915_gem_setup(obj, args->size);
394 return i915_gem_publish(obj, file, &args->size, &args->handle);
397 if (obj->mm.n_placements > 1)
398 kfree(placements_ext);
399 i915_gem_object_free(obj);