2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
30 #include <drm/amdgpu_drm.h>
33 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
38 if (robj->gem_base.import_attach)
39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
40 amdgpu_mn_unregister(robj);
41 amdgpu_bo_unref(&robj);
45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
46 int alignment, u32 initial_domain,
47 u64 flags, bool kernel,
48 struct drm_gem_object **obj)
50 struct amdgpu_bo *robj;
51 unsigned long max_size;
55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) {
57 alignment = PAGE_SIZE;
60 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
64 max_size = adev->mc.gtt_size - adev->gart_pin_size;
65 if (size > max_size) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size >> 20, max_size >> 20);
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
74 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
76 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
79 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
80 size, initial_domain, alignment, r);
84 *obj = &robj->gem_base;
85 robj->pid = task_pid_nr(current);
87 mutex_lock(&adev->gem.mutex);
88 list_add_tail(&robj->list, &adev->gem.objects);
89 mutex_unlock(&adev->gem.mutex);
94 int amdgpu_gem_init(struct amdgpu_device *adev)
96 INIT_LIST_HEAD(&adev->gem.objects);
100 void amdgpu_gem_fini(struct amdgpu_device *adev)
102 amdgpu_bo_force_delete(adev);
106 * Call from drm_gem_handle_create which appear in both new and open ioctl
109 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
111 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
112 struct amdgpu_device *adev = rbo->adev;
113 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
114 struct amdgpu_vm *vm = &fpriv->vm;
115 struct amdgpu_bo_va *bo_va;
118 r = amdgpu_bo_reserve(rbo, false);
123 bo_va = amdgpu_vm_bo_find(vm, rbo);
125 bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
129 amdgpu_bo_unreserve(rbo);
134 void amdgpu_gem_object_close(struct drm_gem_object *obj,
135 struct drm_file *file_priv)
137 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
138 struct amdgpu_device *adev = rbo->adev;
139 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
140 struct amdgpu_vm *vm = &fpriv->vm;
141 struct amdgpu_bo_va *bo_va;
144 r = amdgpu_bo_reserve(rbo, true);
146 dev_err(adev->dev, "leaking bo va because "
147 "we fail to reserve bo (%d)\n", r);
150 bo_va = amdgpu_vm_bo_find(vm, rbo);
152 if (--bo_va->ref_count == 0) {
153 amdgpu_vm_bo_rmv(adev, bo_va);
156 amdgpu_bo_unreserve(rbo);
159 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
162 r = amdgpu_gpu_reset(adev);
172 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
173 struct drm_file *filp)
175 struct amdgpu_device *adev = dev->dev_private;
176 union drm_amdgpu_gem_create *args = data;
177 uint64_t size = args->in.bo_size;
178 struct drm_gem_object *gobj;
183 down_read(&adev->exclusive_lock);
184 /* create a gem object to contain this object in */
185 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
186 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
188 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
189 size = size << AMDGPU_GDS_SHIFT;
190 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
191 size = size << AMDGPU_GWS_SHIFT;
192 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
193 size = size << AMDGPU_OA_SHIFT;
199 size = roundup(size, PAGE_SIZE);
201 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
202 (u32)(0xffffffff & args->in.domains),
203 args->in.domain_flags,
208 r = drm_gem_handle_create(filp, gobj, &handle);
209 /* drop reference from allocate - handle holds it now */
210 drm_gem_object_unreference_unlocked(gobj);
214 memset(args, 0, sizeof(*args));
215 args->out.handle = handle;
216 up_read(&adev->exclusive_lock);
220 up_read(&adev->exclusive_lock);
221 r = amdgpu_gem_handle_lockup(adev, r);
225 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *filp)
228 struct amdgpu_device *adev = dev->dev_private;
229 struct drm_amdgpu_gem_userptr *args = data;
230 struct drm_gem_object *gobj;
231 struct amdgpu_bo *bo;
235 if (offset_in_page(args->addr | args->size))
238 /* reject unknown flag values */
239 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
240 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
241 AMDGPU_GEM_USERPTR_REGISTER))
244 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
245 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
247 /* if we want to write to it we must require anonymous
248 memory and install a MMU notifier */
252 down_read(&adev->exclusive_lock);
254 /* create a gem object to contain this object in */
255 r = amdgpu_gem_object_create(adev, args->size, 0,
256 AMDGPU_GEM_DOMAIN_CPU, 0,
261 bo = gem_to_amdgpu_bo(gobj);
262 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
266 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
267 r = amdgpu_mn_register(bo, args->addr);
272 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
273 down_read(¤t->mm->mmap_sem);
274 r = amdgpu_bo_reserve(bo, true);
276 up_read(¤t->mm->mmap_sem);
280 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
281 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
282 amdgpu_bo_unreserve(bo);
283 up_read(¤t->mm->mmap_sem);
288 r = drm_gem_handle_create(filp, gobj, &handle);
289 /* drop reference from allocate - handle holds it now */
290 drm_gem_object_unreference_unlocked(gobj);
294 args->handle = handle;
295 up_read(&adev->exclusive_lock);
299 drm_gem_object_unreference_unlocked(gobj);
302 up_read(&adev->exclusive_lock);
303 r = amdgpu_gem_handle_lockup(adev, r);
308 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
309 struct drm_device *dev,
310 uint32_t handle, uint64_t *offset_p)
312 struct drm_gem_object *gobj;
313 struct amdgpu_bo *robj;
315 gobj = drm_gem_object_lookup(dev, filp, handle);
319 robj = gem_to_amdgpu_bo(gobj);
320 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
321 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
322 drm_gem_object_unreference_unlocked(gobj);
325 *offset_p = amdgpu_bo_mmap_offset(robj);
326 drm_gem_object_unreference_unlocked(gobj);
330 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *filp)
333 union drm_amdgpu_gem_mmap *args = data;
334 uint32_t handle = args->in.handle;
335 memset(args, 0, sizeof(*args));
336 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
340 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
342 * @timeout_ns: timeout in ns
344 * Calculate the timeout in jiffies from an absolute timeout in ns.
346 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
348 unsigned long timeout_jiffies;
351 /* clamp timeout if it's to large */
352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT;
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns);
356 if (ktime_to_ns(timeout) < 0)
359 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
360 /* clamp timeout to avoid unsigned-> signed overflow */
361 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
362 return MAX_SCHEDULE_TIMEOUT - 1;
364 return timeout_jiffies;
367 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
368 struct drm_file *filp)
370 struct amdgpu_device *adev = dev->dev_private;
371 union drm_amdgpu_gem_wait_idle *args = data;
372 struct drm_gem_object *gobj;
373 struct amdgpu_bo *robj;
374 uint32_t handle = args->in.handle;
375 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
379 gobj = drm_gem_object_lookup(dev, filp, handle);
383 robj = gem_to_amdgpu_bo(gobj);
385 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
387 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
389 /* ret == 0 means not signaled,
390 * ret > 0 means signaled
391 * ret < 0 means interrupted before timeout
394 memset(args, 0, sizeof(*args));
395 args->out.status = (ret == 0);
399 drm_gem_object_unreference_unlocked(gobj);
400 r = amdgpu_gem_handle_lockup(adev, r);
404 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
405 struct drm_file *filp)
407 struct drm_amdgpu_gem_metadata *args = data;
408 struct drm_gem_object *gobj;
409 struct amdgpu_bo *robj;
412 DRM_DEBUG("%d \n", args->handle);
413 gobj = drm_gem_object_lookup(dev, filp, args->handle);
416 robj = gem_to_amdgpu_bo(gobj);
418 r = amdgpu_bo_reserve(robj, false);
419 if (unlikely(r != 0))
422 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
423 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
424 r = amdgpu_bo_get_metadata(robj, args->data.data,
425 sizeof(args->data.data),
426 &args->data.data_size_bytes,
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
431 r = amdgpu_bo_set_metadata(robj, args->data.data,
432 args->data.data_size_bytes,
436 amdgpu_bo_unreserve(robj);
438 drm_gem_object_unreference_unlocked(gobj);
443 * amdgpu_gem_va_update_vm -update the bo_va in its VM
445 * @adev: amdgpu_device pointer
446 * @bo_va: bo_va to update
448 * Update the bo_va directly after setting it's address. Errors are not
449 * vital here, so they are not reported back to userspace.
451 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va)
454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket;
457 struct list_head list;
461 INIT_LIST_HEAD(&list);
463 tv.bo = &bo_va->bo->tbo;
465 list_add(&tv.head, &list);
467 vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
475 list_for_each_entry(entry, &list, head) {
476 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
477 /* if anything is swapped out don't swap it in here,
478 just abort and wait for the next CS */
479 if (domain == AMDGPU_GEM_DOMAIN_CPU)
480 goto error_unreserve;
483 mutex_lock(&bo_va->vm->mutex);
484 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
491 mutex_unlock(&bo_va->vm->mutex);
494 ttm_eu_backoff_reservation(&ticket, &list);
497 drm_free_large(vm_bos);
499 if (r && r != -ERESTARTSYS)
500 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
505 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
506 struct drm_file *filp)
508 struct drm_amdgpu_gem_va *args = data;
509 struct drm_gem_object *gobj;
510 struct amdgpu_device *adev = dev->dev_private;
511 struct amdgpu_fpriv *fpriv = filp->driver_priv;
512 struct amdgpu_bo *rbo;
513 struct amdgpu_bo_va *bo_va;
514 uint32_t invalid_flags, va_flags = 0;
517 if (!adev->vm_manager.enabled)
520 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
521 dev_err(&dev->pdev->dev,
522 "va_address 0x%lX is in reserved area 0x%X\n",
523 (unsigned long)args->va_address,
524 AMDGPU_VA_RESERVED_SIZE);
528 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
529 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
530 if ((args->flags & invalid_flags)) {
531 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
532 args->flags, invalid_flags);
536 switch (args->operation) {
537 case AMDGPU_VA_OP_MAP:
538 case AMDGPU_VA_OP_UNMAP:
541 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
546 gobj = drm_gem_object_lookup(dev, filp, args->handle);
550 rbo = gem_to_amdgpu_bo(gobj);
551 r = amdgpu_bo_reserve(rbo, false);
553 drm_gem_object_unreference_unlocked(gobj);
557 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
559 amdgpu_bo_unreserve(rbo);
563 switch (args->operation) {
564 case AMDGPU_VA_OP_MAP:
565 if (args->flags & AMDGPU_VM_PAGE_READABLE)
566 va_flags |= AMDGPU_PTE_READABLE;
567 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
568 va_flags |= AMDGPU_PTE_WRITEABLE;
569 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
570 va_flags |= AMDGPU_PTE_EXECUTABLE;
571 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
572 args->offset_in_bo, args->map_size,
575 case AMDGPU_VA_OP_UNMAP:
576 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va);
585 drm_gem_object_unreference_unlocked(gobj);
589 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
590 struct drm_file *filp)
592 struct drm_amdgpu_gem_op *args = data;
593 struct drm_gem_object *gobj;
594 struct amdgpu_bo *robj;
597 gobj = drm_gem_object_lookup(dev, filp, args->handle);
601 robj = gem_to_amdgpu_bo(gobj);
603 r = amdgpu_bo_reserve(robj, false);
608 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
609 struct drm_amdgpu_gem_create_in info;
610 void __user *out = (void __user *)(long)args->value;
612 info.bo_size = robj->gem_base.size;
613 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
614 info.domains = robj->initial_domain;
615 info.domain_flags = robj->flags;
616 if (copy_to_user(out, &info, sizeof(info)))
620 case AMDGPU_GEM_OP_SET_PLACEMENT:
621 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
625 robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
626 AMDGPU_GEM_DOMAIN_GTT |
627 AMDGPU_GEM_DOMAIN_CPU);
633 amdgpu_bo_unreserve(robj);
635 drm_gem_object_unreference_unlocked(gobj);
639 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
640 struct drm_device *dev,
641 struct drm_mode_create_dumb *args)
643 struct amdgpu_device *adev = dev->dev_private;
644 struct drm_gem_object *gobj;
648 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
649 args->size = args->pitch * args->height;
650 args->size = ALIGN(args->size, PAGE_SIZE);
652 r = amdgpu_gem_object_create(adev, args->size, 0,
653 AMDGPU_GEM_DOMAIN_VRAM,
654 0, ttm_bo_type_device,
659 r = drm_gem_handle_create(file_priv, gobj, &handle);
660 /* drop reference from allocate - handle holds it now */
661 drm_gem_object_unreference_unlocked(gobj);
665 args->handle = handle;
669 #if defined(CONFIG_DEBUG_FS)
670 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
672 struct drm_info_node *node = (struct drm_info_node *)m->private;
673 struct drm_device *dev = node->minor->dev;
674 struct amdgpu_device *adev = dev->dev_private;
675 struct amdgpu_bo *rbo;
678 mutex_lock(&adev->gem.mutex);
679 list_for_each_entry(rbo, &adev->gem.objects, list) {
681 const char *placement;
683 domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
685 case AMDGPU_GEM_DOMAIN_VRAM:
688 case AMDGPU_GEM_DOMAIN_GTT:
691 case AMDGPU_GEM_DOMAIN_CPU:
696 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
697 i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
698 placement, (unsigned long)rbo->pid);
701 mutex_unlock(&adev->gem.mutex);
705 static struct drm_info_list amdgpu_debugfs_gem_list[] = {
706 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
710 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
712 #if defined(CONFIG_DEBUG_FS)
713 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);