3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 #include <linux/sched/task.h>
43 static struct ion_device *internal_dev;
44 static int heap_id = 0;
46 bool ion_buffer_cached(struct ion_buffer *buffer)
48 return !!(buffer->flags & ION_FLAG_CACHED);
51 /* this function should only be called while dev->lock is held */
52 static void ion_buffer_add(struct ion_device *dev,
53 struct ion_buffer *buffer)
55 struct rb_node **p = &dev->buffers.rb_node;
56 struct rb_node *parent = NULL;
57 struct ion_buffer *entry;
61 entry = rb_entry(parent, struct ion_buffer, node);
65 } else if (buffer > entry) {
68 pr_err("%s: buffer already found.", __func__);
73 rb_link_node(&buffer->node, parent, p);
74 rb_insert_color(&buffer->node, &dev->buffers);
77 /* this function should only be called while dev->lock is held */
78 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
79 struct ion_device *dev,
83 struct ion_buffer *buffer;
84 struct sg_table *table;
87 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
89 return ERR_PTR(-ENOMEM);
92 buffer->flags = flags;
93 kref_init(&buffer->ref);
95 ret = heap->ops->allocate(heap, buffer, len, flags);
98 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
101 ion_heap_freelist_drain(heap, 0);
102 ret = heap->ops->allocate(heap, buffer, len, flags);
107 if (buffer->sg_table == NULL) {
108 WARN_ONCE(1, "This heap needs to set the sgtable");
113 table = buffer->sg_table;
119 INIT_LIST_HEAD(&buffer->vmas);
120 INIT_LIST_HEAD(&buffer->attachments);
121 mutex_init(&buffer->lock);
122 mutex_lock(&dev->buffer_lock);
123 ion_buffer_add(dev, buffer);
124 mutex_unlock(&dev->buffer_lock);
128 heap->ops->free(buffer);
134 void ion_buffer_destroy(struct ion_buffer *buffer)
136 if (WARN_ON(buffer->kmap_cnt > 0))
137 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
138 buffer->heap->ops->free(buffer);
139 vfree(buffer->pages);
143 static void _ion_buffer_destroy(struct kref *kref)
145 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
146 struct ion_heap *heap = buffer->heap;
147 struct ion_device *dev = buffer->dev;
149 mutex_lock(&dev->buffer_lock);
150 rb_erase(&buffer->node, &dev->buffers);
151 mutex_unlock(&dev->buffer_lock);
153 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
154 ion_heap_freelist_add(heap, buffer);
156 ion_buffer_destroy(buffer);
159 static void ion_buffer_get(struct ion_buffer *buffer)
161 kref_get(&buffer->ref);
164 static int ion_buffer_put(struct ion_buffer *buffer)
166 return kref_put(&buffer->ref, _ion_buffer_destroy);
169 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
171 mutex_lock(&buffer->lock);
172 buffer->handle_count++;
173 mutex_unlock(&buffer->lock);
176 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
179 * when a buffer is removed from a handle, if it is not in
180 * any other handles, copy the taskcomm and the pid of the
181 * process it's being removed from into the buffer. At this
182 * point there will be no way to track what processes this buffer is
183 * being used by, it only exists as a dma_buf file descriptor.
184 * The taskcomm and pid can provide a debug hint as to where this fd
187 mutex_lock(&buffer->lock);
188 buffer->handle_count--;
189 BUG_ON(buffer->handle_count < 0);
190 if (!buffer->handle_count) {
191 struct task_struct *task;
193 task = current->group_leader;
194 get_task_comm(buffer->task_comm, task);
195 buffer->pid = task_pid_nr(task);
197 mutex_unlock(&buffer->lock);
200 static struct ion_handle *ion_handle_create(struct ion_client *client,
201 struct ion_buffer *buffer)
203 struct ion_handle *handle;
205 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
207 return ERR_PTR(-ENOMEM);
208 kref_init(&handle->ref);
209 RB_CLEAR_NODE(&handle->node);
210 handle->client = client;
211 ion_buffer_get(buffer);
212 ion_buffer_add_to_handle(buffer);
213 handle->buffer = buffer;
218 static void ion_handle_kmap_put(struct ion_handle *);
220 static void ion_handle_destroy(struct kref *kref)
222 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
223 struct ion_client *client = handle->client;
224 struct ion_buffer *buffer = handle->buffer;
226 mutex_lock(&buffer->lock);
227 while (handle->kmap_cnt)
228 ion_handle_kmap_put(handle);
229 mutex_unlock(&buffer->lock);
231 idr_remove(&client->idr, handle->id);
232 if (!RB_EMPTY_NODE(&handle->node))
233 rb_erase(&handle->node, &client->handles);
235 ion_buffer_remove_from_handle(buffer);
236 ion_buffer_put(buffer);
241 static void ion_handle_get(struct ion_handle *handle)
243 kref_get(&handle->ref);
246 int ion_handle_put_nolock(struct ion_handle *handle)
248 return kref_put(&handle->ref, ion_handle_destroy);
251 int ion_handle_put(struct ion_handle *handle)
253 struct ion_client *client = handle->client;
256 mutex_lock(&client->lock);
257 ret = ion_handle_put_nolock(handle);
258 mutex_unlock(&client->lock);
263 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
266 struct ion_handle *handle;
268 handle = idr_find(&client->idr, id);
270 ion_handle_get(handle);
272 return handle ? handle : ERR_PTR(-EINVAL);
275 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
278 struct ion_handle *handle;
280 mutex_lock(&client->lock);
281 handle = ion_handle_get_by_id_nolock(client, id);
282 mutex_unlock(&client->lock);
287 static bool ion_handle_validate(struct ion_client *client,
288 struct ion_handle *handle)
290 WARN_ON(!mutex_is_locked(&client->lock));
291 return idr_find(&client->idr, handle->id) == handle;
294 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
297 struct rb_node **p = &client->handles.rb_node;
298 struct rb_node *parent = NULL;
299 struct ion_handle *entry;
301 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
309 entry = rb_entry(parent, struct ion_handle, node);
311 if (handle->buffer < entry->buffer)
313 else if (handle->buffer > entry->buffer)
316 WARN(1, "%s: buffer already found.", __func__);
319 rb_link_node(&handle->node, parent, p);
320 rb_insert_color(&handle->node, &client->handles);
325 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
326 unsigned int heap_id_mask,
329 struct ion_handle *handle;
330 struct ion_device *dev = client->dev;
331 struct ion_buffer *buffer = NULL;
332 struct ion_heap *heap;
335 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
336 len, heap_id_mask, flags);
338 * traverse the list of heaps available in this system in priority
339 * order. If the heap type is supported by the client, and matches the
340 * request of the caller allocate from it. Repeat until allocate has
341 * succeeded or all heaps have been tried
343 len = PAGE_ALIGN(len);
346 return ERR_PTR(-EINVAL);
348 down_read(&dev->lock);
349 plist_for_each_entry(heap, &dev->heaps, node) {
350 /* if the caller didn't specify this heap id */
351 if (!((1 << heap->id) & heap_id_mask))
353 buffer = ion_buffer_create(heap, dev, len, flags);
360 return ERR_PTR(-ENODEV);
363 return ERR_CAST(buffer);
365 handle = ion_handle_create(client, buffer);
368 * ion_buffer_create will create a buffer with a ref_cnt of 1,
369 * and ion_handle_create will take a second reference, drop one here
371 ion_buffer_put(buffer);
376 mutex_lock(&client->lock);
377 ret = ion_handle_add(client, handle);
378 mutex_unlock(&client->lock);
380 ion_handle_put(handle);
381 handle = ERR_PTR(ret);
386 EXPORT_SYMBOL(ion_alloc);
388 void ion_free_nolock(struct ion_client *client,
389 struct ion_handle *handle)
391 if (!ion_handle_validate(client, handle)) {
392 WARN(1, "%s: invalid handle passed to free.\n", __func__);
395 ion_handle_put_nolock(handle);
398 void ion_free(struct ion_client *client, struct ion_handle *handle)
400 BUG_ON(client != handle->client);
402 mutex_lock(&client->lock);
403 ion_free_nolock(client, handle);
404 mutex_unlock(&client->lock);
406 EXPORT_SYMBOL(ion_free);
408 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
412 if (buffer->kmap_cnt) {
414 return buffer->vaddr;
416 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
417 if (WARN_ONCE(vaddr == NULL,
418 "heap->ops->map_kernel should return ERR_PTR on error"))
419 return ERR_PTR(-EINVAL);
422 buffer->vaddr = vaddr;
427 static void *ion_handle_kmap_get(struct ion_handle *handle)
429 struct ion_buffer *buffer = handle->buffer;
432 if (handle->kmap_cnt) {
434 return buffer->vaddr;
436 vaddr = ion_buffer_kmap_get(buffer);
443 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
446 if (!buffer->kmap_cnt) {
447 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
448 buffer->vaddr = NULL;
452 static void ion_handle_kmap_put(struct ion_handle *handle)
454 struct ion_buffer *buffer = handle->buffer;
456 if (!handle->kmap_cnt) {
457 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
461 if (!handle->kmap_cnt)
462 ion_buffer_kmap_put(buffer);
465 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
467 struct ion_buffer *buffer;
470 mutex_lock(&client->lock);
471 if (!ion_handle_validate(client, handle)) {
472 pr_err("%s: invalid handle passed to map_kernel.\n",
474 mutex_unlock(&client->lock);
475 return ERR_PTR(-EINVAL);
478 buffer = handle->buffer;
480 if (!handle->buffer->heap->ops->map_kernel) {
481 pr_err("%s: map_kernel is not implemented by this heap.\n",
483 mutex_unlock(&client->lock);
484 return ERR_PTR(-ENODEV);
487 mutex_lock(&buffer->lock);
488 vaddr = ion_handle_kmap_get(handle);
489 mutex_unlock(&buffer->lock);
490 mutex_unlock(&client->lock);
493 EXPORT_SYMBOL(ion_map_kernel);
495 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
497 struct ion_buffer *buffer;
499 mutex_lock(&client->lock);
500 buffer = handle->buffer;
501 mutex_lock(&buffer->lock);
502 ion_handle_kmap_put(handle);
503 mutex_unlock(&buffer->lock);
504 mutex_unlock(&client->lock);
506 EXPORT_SYMBOL(ion_unmap_kernel);
508 static struct mutex debugfs_mutex;
509 static struct rb_root *ion_root_client;
510 static int is_client_alive(struct ion_client *client)
512 struct rb_node *node;
513 struct ion_client *tmp;
514 struct ion_device *dev;
516 node = ion_root_client->rb_node;
517 dev = container_of(ion_root_client, struct ion_device, clients);
519 down_read(&dev->lock);
521 tmp = rb_entry(node, struct ion_client, node);
523 node = node->rb_left;
524 } else if (client > tmp) {
525 node = node->rb_right;
536 static int ion_debug_client_show(struct seq_file *s, void *unused)
538 struct ion_client *client = s->private;
540 size_t sizes[ION_NUM_HEAP_IDS] = {0};
541 const char *names[ION_NUM_HEAP_IDS] = {NULL};
544 mutex_lock(&debugfs_mutex);
545 if (!is_client_alive(client)) {
546 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
548 mutex_unlock(&debugfs_mutex);
552 mutex_lock(&client->lock);
553 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
554 struct ion_handle *handle = rb_entry(n, struct ion_handle,
556 unsigned int id = handle->buffer->heap->id;
559 names[id] = handle->buffer->heap->name;
560 sizes[id] += handle->buffer->size;
562 mutex_unlock(&client->lock);
563 mutex_unlock(&debugfs_mutex);
565 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
566 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
569 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
574 static int ion_debug_client_open(struct inode *inode, struct file *file)
576 return single_open(file, ion_debug_client_show, inode->i_private);
579 static const struct file_operations debug_client_fops = {
580 .open = ion_debug_client_open,
583 .release = single_release,
586 static int ion_get_client_serial(const struct rb_root *root,
587 const unsigned char *name)
590 struct rb_node *node;
592 for (node = rb_first(root); node; node = rb_next(node)) {
593 struct ion_client *client = rb_entry(node, struct ion_client,
596 if (strcmp(client->name, name))
598 serial = max(serial, client->display_serial);
603 struct ion_client *ion_client_create(struct ion_device *dev,
606 struct ion_client *client;
607 struct task_struct *task;
609 struct rb_node *parent = NULL;
610 struct ion_client *entry;
614 pr_err("%s: Name cannot be null\n", __func__);
615 return ERR_PTR(-EINVAL);
618 get_task_struct(current->group_leader);
619 task_lock(current->group_leader);
620 pid = task_pid_nr(current->group_leader);
622 * don't bother to store task struct for kernel threads,
623 * they can't be killed anyway
625 if (current->group_leader->flags & PF_KTHREAD) {
626 put_task_struct(current->group_leader);
629 task = current->group_leader;
631 task_unlock(current->group_leader);
633 client = kzalloc(sizeof(*client), GFP_KERNEL);
635 goto err_put_task_struct;
638 client->handles = RB_ROOT;
639 idr_init(&client->idr);
640 mutex_init(&client->lock);
643 client->name = kstrdup(name, GFP_KERNEL);
645 goto err_free_client;
647 down_write(&dev->lock);
648 client->display_serial = ion_get_client_serial(&dev->clients, name);
649 client->display_name = kasprintf(
650 GFP_KERNEL, "%s-%d", name, client->display_serial);
651 if (!client->display_name) {
652 up_write(&dev->lock);
653 goto err_free_client_name;
655 p = &dev->clients.rb_node;
658 entry = rb_entry(parent, struct ion_client, node);
662 else if (client > entry)
665 rb_link_node(&client->node, parent, p);
666 rb_insert_color(&client->node, &dev->clients);
668 client->debug_root = debugfs_create_file(client->display_name, 0664,
669 dev->clients_debug_root,
670 client, &debug_client_fops);
671 if (!client->debug_root) {
672 char buf[256], *path;
674 path = dentry_path(dev->clients_debug_root, buf, 256);
675 pr_err("Failed to create client debugfs at %s/%s\n",
676 path, client->display_name);
679 up_write(&dev->lock);
683 err_free_client_name:
689 put_task_struct(current->group_leader);
690 return ERR_PTR(-ENOMEM);
692 EXPORT_SYMBOL(ion_client_create);
694 void ion_client_destroy(struct ion_client *client)
696 struct ion_device *dev = client->dev;
699 pr_debug("%s: %d\n", __func__, __LINE__);
700 mutex_lock(&debugfs_mutex);
701 while ((n = rb_first(&client->handles))) {
702 struct ion_handle *handle = rb_entry(n, struct ion_handle,
704 ion_handle_destroy(&handle->ref);
707 idr_destroy(&client->idr);
709 down_write(&dev->lock);
711 put_task_struct(client->task);
712 rb_erase(&client->node, &dev->clients);
713 debugfs_remove_recursive(client->debug_root);
714 up_write(&dev->lock);
716 kfree(client->display_name);
719 mutex_unlock(&debugfs_mutex);
721 EXPORT_SYMBOL(ion_client_destroy);
723 static struct sg_table *dup_sg_table(struct sg_table *table)
725 struct sg_table *new_table;
727 struct scatterlist *sg, *new_sg;
729 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
731 return ERR_PTR(-ENOMEM);
733 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
736 return ERR_PTR(-ENOMEM);
739 new_sg = new_table->sgl;
740 for_each_sg(table->sgl, sg, table->nents, i) {
741 memcpy(new_sg, sg, sizeof(*sg));
743 new_sg = sg_next(new_sg);
749 static void free_duped_table(struct sg_table *table)
751 sg_free_table(table);
755 struct ion_dma_buf_attachment {
757 struct sg_table *table;
758 struct list_head list;
761 static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
762 struct dma_buf_attachment *attachment)
764 struct ion_dma_buf_attachment *a;
765 struct sg_table *table;
766 struct ion_buffer *buffer = dmabuf->priv;
768 a = kzalloc(sizeof(*a), GFP_KERNEL);
772 table = dup_sg_table(buffer->sg_table);
780 INIT_LIST_HEAD(&a->list);
782 attachment->priv = a;
784 mutex_lock(&buffer->lock);
785 list_add(&a->list, &buffer->attachments);
786 mutex_unlock(&buffer->lock);
791 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
792 struct dma_buf_attachment *attachment)
794 struct ion_dma_buf_attachment *a = attachment->priv;
795 struct ion_buffer *buffer = dmabuf->priv;
797 free_duped_table(a->table);
798 mutex_lock(&buffer->lock);
800 mutex_unlock(&buffer->lock);
806 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
807 enum dma_data_direction direction)
809 struct ion_dma_buf_attachment *a = attachment->priv;
810 struct sg_table *table;
815 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
823 free_duped_table(table);
827 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
828 struct sg_table *table,
829 enum dma_data_direction direction)
831 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
834 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
836 struct ion_buffer *buffer = dmabuf->priv;
839 if (!buffer->heap->ops->map_user) {
840 pr_err("%s: this heap does not define a method for mapping to userspace\n",
845 if (!(buffer->flags & ION_FLAG_CACHED))
846 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
848 mutex_lock(&buffer->lock);
849 /* now map it to userspace */
850 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
851 mutex_unlock(&buffer->lock);
854 pr_err("%s: failure mapping buffer to userspace\n",
860 static void ion_dma_buf_release(struct dma_buf *dmabuf)
862 struct ion_buffer *buffer = dmabuf->priv;
864 ion_buffer_put(buffer);
867 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
869 struct ion_buffer *buffer = dmabuf->priv;
871 return buffer->vaddr + offset * PAGE_SIZE;
874 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
879 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
880 enum dma_data_direction direction)
882 struct ion_buffer *buffer = dmabuf->priv;
884 struct ion_dma_buf_attachment *a;
887 * TODO: Move this elsewhere because we don't always need a vaddr
889 if (buffer->heap->ops->map_kernel) {
890 mutex_lock(&buffer->lock);
891 vaddr = ion_buffer_kmap_get(buffer);
892 mutex_unlock(&buffer->lock);
896 mutex_lock(&buffer->lock);
897 list_for_each_entry(a, &buffer->attachments, list) {
898 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
901 mutex_unlock(&buffer->lock);
906 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
907 enum dma_data_direction direction)
909 struct ion_buffer *buffer = dmabuf->priv;
910 struct ion_dma_buf_attachment *a;
912 if (buffer->heap->ops->map_kernel) {
913 mutex_lock(&buffer->lock);
914 ion_buffer_kmap_put(buffer);
915 mutex_unlock(&buffer->lock);
918 mutex_lock(&buffer->lock);
919 list_for_each_entry(a, &buffer->attachments, list) {
920 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
923 mutex_unlock(&buffer->lock);
928 static const struct dma_buf_ops dma_buf_ops = {
929 .map_dma_buf = ion_map_dma_buf,
930 .unmap_dma_buf = ion_unmap_dma_buf,
932 .release = ion_dma_buf_release,
933 .attach = ion_dma_buf_attach,
934 .detach = ion_dma_buf_detatch,
935 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
936 .end_cpu_access = ion_dma_buf_end_cpu_access,
937 .kmap_atomic = ion_dma_buf_kmap,
938 .kunmap_atomic = ion_dma_buf_kunmap,
939 .kmap = ion_dma_buf_kmap,
940 .kunmap = ion_dma_buf_kunmap,
943 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
944 struct ion_handle *handle)
946 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
947 struct ion_buffer *buffer;
948 struct dma_buf *dmabuf;
951 mutex_lock(&client->lock);
952 valid_handle = ion_handle_validate(client, handle);
954 WARN(1, "%s: invalid handle passed to share.\n", __func__);
955 mutex_unlock(&client->lock);
956 return ERR_PTR(-EINVAL);
958 buffer = handle->buffer;
959 ion_buffer_get(buffer);
960 mutex_unlock(&client->lock);
962 exp_info.ops = &dma_buf_ops;
963 exp_info.size = buffer->size;
964 exp_info.flags = O_RDWR;
965 exp_info.priv = buffer;
967 dmabuf = dma_buf_export(&exp_info);
968 if (IS_ERR(dmabuf)) {
969 ion_buffer_put(buffer);
975 EXPORT_SYMBOL(ion_share_dma_buf);
977 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
979 struct dma_buf *dmabuf;
982 dmabuf = ion_share_dma_buf(client, handle);
984 return PTR_ERR(dmabuf);
986 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
992 EXPORT_SYMBOL(ion_share_dma_buf_fd);
994 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
996 struct ion_device *dev = client->dev;
997 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
998 int ret = -EINVAL, cnt = 0, max_cnt;
999 struct ion_heap *heap;
1000 struct ion_heap_data hdata;
1002 memset(&hdata, 0, sizeof(hdata));
1004 down_read(&dev->lock);
1006 query->cnt = dev->heap_cnt;
1011 if (query->cnt <= 0)
1014 max_cnt = query->cnt;
1016 plist_for_each_entry(heap, &dev->heaps, node) {
1017 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1018 hdata.name[sizeof(hdata.name) - 1] = '\0';
1019 hdata.type = heap->type;
1020 hdata.heap_id = heap->id;
1022 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1034 up_read(&dev->lock);
1038 static int ion_release(struct inode *inode, struct file *file)
1040 struct ion_client *client = file->private_data;
1042 pr_debug("%s: %d\n", __func__, __LINE__);
1043 ion_client_destroy(client);
1047 static int ion_open(struct inode *inode, struct file *file)
1049 struct miscdevice *miscdev = file->private_data;
1050 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1051 struct ion_client *client;
1052 char debug_name[64];
1054 pr_debug("%s: %d\n", __func__, __LINE__);
1055 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1056 client = ion_client_create(dev, debug_name);
1058 return PTR_ERR(client);
1059 file->private_data = client;
1064 static const struct file_operations ion_fops = {
1065 .owner = THIS_MODULE,
1067 .release = ion_release,
1068 .unlocked_ioctl = ion_ioctl,
1069 #ifdef CONFIG_COMPAT
1070 .compat_ioctl = ion_ioctl,
1074 static size_t ion_debug_heap_total(struct ion_client *client,
1080 mutex_lock(&client->lock);
1081 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1082 struct ion_handle *handle = rb_entry(n,
1085 if (handle->buffer->heap->id == id)
1086 size += handle->buffer->size;
1088 mutex_unlock(&client->lock);
1092 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1094 struct ion_heap *heap = s->private;
1095 struct ion_device *dev = heap->dev;
1097 size_t total_size = 0;
1098 size_t total_orphaned_size = 0;
1100 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1101 seq_puts(s, "----------------------------------------------------\n");
1103 mutex_lock(&debugfs_mutex);
1104 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1105 struct ion_client *client = rb_entry(n, struct ion_client,
1107 size_t size = ion_debug_heap_total(client, heap->id);
1112 char task_comm[TASK_COMM_LEN];
1114 get_task_comm(task_comm, client->task);
1115 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1118 seq_printf(s, "%16s %16u %16zu\n", client->name,
1122 mutex_unlock(&debugfs_mutex);
1124 seq_puts(s, "----------------------------------------------------\n");
1125 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1126 mutex_lock(&dev->buffer_lock);
1127 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1128 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1130 if (buffer->heap->id != heap->id)
1132 total_size += buffer->size;
1133 if (!buffer->handle_count) {
1134 seq_printf(s, "%16s %16u %16zu %d %d\n",
1135 buffer->task_comm, buffer->pid,
1136 buffer->size, buffer->kmap_cnt,
1137 kref_read(&buffer->ref));
1138 total_orphaned_size += buffer->size;
1141 mutex_unlock(&dev->buffer_lock);
1142 seq_puts(s, "----------------------------------------------------\n");
1143 seq_printf(s, "%16s %16zu\n", "total orphaned",
1144 total_orphaned_size);
1145 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1146 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1147 seq_printf(s, "%16s %16zu\n", "deferred free",
1148 heap->free_list_size);
1149 seq_puts(s, "----------------------------------------------------\n");
1151 if (heap->debug_show)
1152 heap->debug_show(heap, s, unused);
1157 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1159 return single_open(file, ion_debug_heap_show, inode->i_private);
1162 static const struct file_operations debug_heap_fops = {
1163 .open = ion_debug_heap_open,
1165 .llseek = seq_lseek,
1166 .release = single_release,
1169 static int debug_shrink_set(void *data, u64 val)
1171 struct ion_heap *heap = data;
1172 struct shrink_control sc;
1175 sc.gfp_mask = GFP_HIGHUSER;
1176 sc.nr_to_scan = val;
1179 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1180 sc.nr_to_scan = objs;
1183 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1187 static int debug_shrink_get(void *data, u64 *val)
1189 struct ion_heap *heap = data;
1190 struct shrink_control sc;
1193 sc.gfp_mask = GFP_HIGHUSER;
1196 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1201 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1202 debug_shrink_set, "%llu\n");
1204 void ion_device_add_heap(struct ion_heap *heap)
1206 struct dentry *debug_file;
1207 struct ion_device *dev = internal_dev;
1209 if (!heap->ops->allocate || !heap->ops->free)
1210 pr_err("%s: can not add heap with invalid ops struct.\n",
1213 spin_lock_init(&heap->free_lock);
1214 heap->free_list_size = 0;
1216 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1217 ion_heap_init_deferred_free(heap);
1219 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1220 ion_heap_init_shrinker(heap);
1223 down_write(&dev->lock);
1224 heap->id = heap_id++;
1226 * use negative heap->id to reverse the priority -- when traversing
1227 * the list later attempt higher id numbers first
1229 plist_node_init(&heap->node, -heap->id);
1230 plist_add(&heap->node, &dev->heaps);
1231 debug_file = debugfs_create_file(heap->name, 0664,
1232 dev->heaps_debug_root, heap,
1236 char buf[256], *path;
1238 path = dentry_path(dev->heaps_debug_root, buf, 256);
1239 pr_err("Failed to create heap debugfs at %s/%s\n",
1243 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1244 char debug_name[64];
1246 snprintf(debug_name, 64, "%s_shrink", heap->name);
1247 debug_file = debugfs_create_file(
1248 debug_name, 0644, dev->heaps_debug_root, heap,
1249 &debug_shrink_fops);
1251 char buf[256], *path;
1253 path = dentry_path(dev->heaps_debug_root, buf, 256);
1254 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1260 up_write(&dev->lock);
1262 EXPORT_SYMBOL(ion_device_add_heap);
1264 int ion_device_create(void)
1266 struct ion_device *idev;
1269 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1273 idev->dev.minor = MISC_DYNAMIC_MINOR;
1274 idev->dev.name = "ion";
1275 idev->dev.fops = &ion_fops;
1276 idev->dev.parent = NULL;
1277 ret = misc_register(&idev->dev);
1279 pr_err("ion: failed to register misc device.\n");
1284 idev->debug_root = debugfs_create_dir("ion", NULL);
1285 if (!idev->debug_root) {
1286 pr_err("ion: failed to create debugfs root directory.\n");
1289 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1290 if (!idev->heaps_debug_root) {
1291 pr_err("ion: failed to create debugfs heaps directory.\n");
1294 idev->clients_debug_root = debugfs_create_dir("clients",
1296 if (!idev->clients_debug_root)
1297 pr_err("ion: failed to create debugfs clients directory.\n");
1300 idev->buffers = RB_ROOT;
1301 mutex_init(&idev->buffer_lock);
1302 init_rwsem(&idev->lock);
1303 plist_head_init(&idev->heaps);
1304 idev->clients = RB_ROOT;
1305 ion_root_client = &idev->clients;
1306 mutex_init(&debugfs_mutex);
1307 internal_dev = idev;
1310 subsys_initcall(ion_device_create);