staging: android: ion: Rework heap registration/enumeration
[linux-2.6-microblaze.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 #include <linux/sched/task.h>
40
41 #include "ion.h"
42
43 static struct ion_device *internal_dev;
44 static int heap_id = 0;
45
46 bool ion_buffer_cached(struct ion_buffer *buffer)
47 {
48         return !!(buffer->flags & ION_FLAG_CACHED);
49 }
50
51 /* this function should only be called while dev->lock is held */
52 static void ion_buffer_add(struct ion_device *dev,
53                            struct ion_buffer *buffer)
54 {
55         struct rb_node **p = &dev->buffers.rb_node;
56         struct rb_node *parent = NULL;
57         struct ion_buffer *entry;
58
59         while (*p) {
60                 parent = *p;
61                 entry = rb_entry(parent, struct ion_buffer, node);
62
63                 if (buffer < entry) {
64                         p = &(*p)->rb_left;
65                 } else if (buffer > entry) {
66                         p = &(*p)->rb_right;
67                 } else {
68                         pr_err("%s: buffer already found.", __func__);
69                         BUG();
70                 }
71         }
72
73         rb_link_node(&buffer->node, parent, p);
74         rb_insert_color(&buffer->node, &dev->buffers);
75 }
76
77 /* this function should only be called while dev->lock is held */
78 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
79                                             struct ion_device *dev,
80                                             unsigned long len,
81                                             unsigned long flags)
82 {
83         struct ion_buffer *buffer;
84         struct sg_table *table;
85         int ret;
86
87         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
88         if (!buffer)
89                 return ERR_PTR(-ENOMEM);
90
91         buffer->heap = heap;
92         buffer->flags = flags;
93         kref_init(&buffer->ref);
94
95         ret = heap->ops->allocate(heap, buffer, len, flags);
96
97         if (ret) {
98                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
99                         goto err2;
100
101                 ion_heap_freelist_drain(heap, 0);
102                 ret = heap->ops->allocate(heap, buffer, len, flags);
103                 if (ret)
104                         goto err2;
105         }
106
107         if (buffer->sg_table == NULL) {
108                 WARN_ONCE(1, "This heap needs to set the sgtable");
109                 ret = -EINVAL;
110                 goto err1;
111         }
112
113         table = buffer->sg_table;
114         buffer->dev = dev;
115         buffer->size = len;
116
117         buffer->dev = dev;
118         buffer->size = len;
119         INIT_LIST_HEAD(&buffer->vmas);
120         INIT_LIST_HEAD(&buffer->attachments);
121         mutex_init(&buffer->lock);
122         mutex_lock(&dev->buffer_lock);
123         ion_buffer_add(dev, buffer);
124         mutex_unlock(&dev->buffer_lock);
125         return buffer;
126
127 err1:
128         heap->ops->free(buffer);
129 err2:
130         kfree(buffer);
131         return ERR_PTR(ret);
132 }
133
134 void ion_buffer_destroy(struct ion_buffer *buffer)
135 {
136         if (WARN_ON(buffer->kmap_cnt > 0))
137                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
138         buffer->heap->ops->free(buffer);
139         vfree(buffer->pages);
140         kfree(buffer);
141 }
142
143 static void _ion_buffer_destroy(struct kref *kref)
144 {
145         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
146         struct ion_heap *heap = buffer->heap;
147         struct ion_device *dev = buffer->dev;
148
149         mutex_lock(&dev->buffer_lock);
150         rb_erase(&buffer->node, &dev->buffers);
151         mutex_unlock(&dev->buffer_lock);
152
153         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
154                 ion_heap_freelist_add(heap, buffer);
155         else
156                 ion_buffer_destroy(buffer);
157 }
158
159 static void ion_buffer_get(struct ion_buffer *buffer)
160 {
161         kref_get(&buffer->ref);
162 }
163
164 static int ion_buffer_put(struct ion_buffer *buffer)
165 {
166         return kref_put(&buffer->ref, _ion_buffer_destroy);
167 }
168
169 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
170 {
171         mutex_lock(&buffer->lock);
172         buffer->handle_count++;
173         mutex_unlock(&buffer->lock);
174 }
175
176 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
177 {
178         /*
179          * when a buffer is removed from a handle, if it is not in
180          * any other handles, copy the taskcomm and the pid of the
181          * process it's being removed from into the buffer.  At this
182          * point there will be no way to track what processes this buffer is
183          * being used by, it only exists as a dma_buf file descriptor.
184          * The taskcomm and pid can provide a debug hint as to where this fd
185          * is in the system
186          */
187         mutex_lock(&buffer->lock);
188         buffer->handle_count--;
189         BUG_ON(buffer->handle_count < 0);
190         if (!buffer->handle_count) {
191                 struct task_struct *task;
192
193                 task = current->group_leader;
194                 get_task_comm(buffer->task_comm, task);
195                 buffer->pid = task_pid_nr(task);
196         }
197         mutex_unlock(&buffer->lock);
198 }
199
200 static struct ion_handle *ion_handle_create(struct ion_client *client,
201                                             struct ion_buffer *buffer)
202 {
203         struct ion_handle *handle;
204
205         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
206         if (!handle)
207                 return ERR_PTR(-ENOMEM);
208         kref_init(&handle->ref);
209         RB_CLEAR_NODE(&handle->node);
210         handle->client = client;
211         ion_buffer_get(buffer);
212         ion_buffer_add_to_handle(buffer);
213         handle->buffer = buffer;
214
215         return handle;
216 }
217
218 static void ion_handle_kmap_put(struct ion_handle *);
219
220 static void ion_handle_destroy(struct kref *kref)
221 {
222         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
223         struct ion_client *client = handle->client;
224         struct ion_buffer *buffer = handle->buffer;
225
226         mutex_lock(&buffer->lock);
227         while (handle->kmap_cnt)
228                 ion_handle_kmap_put(handle);
229         mutex_unlock(&buffer->lock);
230
231         idr_remove(&client->idr, handle->id);
232         if (!RB_EMPTY_NODE(&handle->node))
233                 rb_erase(&handle->node, &client->handles);
234
235         ion_buffer_remove_from_handle(buffer);
236         ion_buffer_put(buffer);
237
238         kfree(handle);
239 }
240
241 static void ion_handle_get(struct ion_handle *handle)
242 {
243         kref_get(&handle->ref);
244 }
245
246 int ion_handle_put_nolock(struct ion_handle *handle)
247 {
248         return kref_put(&handle->ref, ion_handle_destroy);
249 }
250
251 int ion_handle_put(struct ion_handle *handle)
252 {
253         struct ion_client *client = handle->client;
254         int ret;
255
256         mutex_lock(&client->lock);
257         ret = ion_handle_put_nolock(handle);
258         mutex_unlock(&client->lock);
259
260         return ret;
261 }
262
263 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
264                                                int id)
265 {
266         struct ion_handle *handle;
267
268         handle = idr_find(&client->idr, id);
269         if (handle)
270                 ion_handle_get(handle);
271
272         return handle ? handle : ERR_PTR(-EINVAL);
273 }
274
275 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
276                                                int id)
277 {
278         struct ion_handle *handle;
279
280         mutex_lock(&client->lock);
281         handle = ion_handle_get_by_id_nolock(client, id);
282         mutex_unlock(&client->lock);
283
284         return handle;
285 }
286
287 static bool ion_handle_validate(struct ion_client *client,
288                                 struct ion_handle *handle)
289 {
290         WARN_ON(!mutex_is_locked(&client->lock));
291         return idr_find(&client->idr, handle->id) == handle;
292 }
293
294 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
295 {
296         int id;
297         struct rb_node **p = &client->handles.rb_node;
298         struct rb_node *parent = NULL;
299         struct ion_handle *entry;
300
301         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
302         if (id < 0)
303                 return id;
304
305         handle->id = id;
306
307         while (*p) {
308                 parent = *p;
309                 entry = rb_entry(parent, struct ion_handle, node);
310
311                 if (handle->buffer < entry->buffer)
312                         p = &(*p)->rb_left;
313                 else if (handle->buffer > entry->buffer)
314                         p = &(*p)->rb_right;
315                 else
316                         WARN(1, "%s: buffer already found.", __func__);
317         }
318
319         rb_link_node(&handle->node, parent, p);
320         rb_insert_color(&handle->node, &client->handles);
321
322         return 0;
323 }
324
325 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
326                              unsigned int heap_id_mask,
327                              unsigned int flags)
328 {
329         struct ion_handle *handle;
330         struct ion_device *dev = client->dev;
331         struct ion_buffer *buffer = NULL;
332         struct ion_heap *heap;
333         int ret;
334
335         pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
336                  len, heap_id_mask, flags);
337         /*
338          * traverse the list of heaps available in this system in priority
339          * order.  If the heap type is supported by the client, and matches the
340          * request of the caller allocate from it.  Repeat until allocate has
341          * succeeded or all heaps have been tried
342          */
343         len = PAGE_ALIGN(len);
344
345         if (!len)
346                 return ERR_PTR(-EINVAL);
347
348         down_read(&dev->lock);
349         plist_for_each_entry(heap, &dev->heaps, node) {
350                 /* if the caller didn't specify this heap id */
351                 if (!((1 << heap->id) & heap_id_mask))
352                         continue;
353                 buffer = ion_buffer_create(heap, dev, len, flags);
354                 if (!IS_ERR(buffer))
355                         break;
356         }
357         up_read(&dev->lock);
358
359         if (buffer == NULL)
360                 return ERR_PTR(-ENODEV);
361
362         if (IS_ERR(buffer))
363                 return ERR_CAST(buffer);
364
365         handle = ion_handle_create(client, buffer);
366
367         /*
368          * ion_buffer_create will create a buffer with a ref_cnt of 1,
369          * and ion_handle_create will take a second reference, drop one here
370          */
371         ion_buffer_put(buffer);
372
373         if (IS_ERR(handle))
374                 return handle;
375
376         mutex_lock(&client->lock);
377         ret = ion_handle_add(client, handle);
378         mutex_unlock(&client->lock);
379         if (ret) {
380                 ion_handle_put(handle);
381                 handle = ERR_PTR(ret);
382         }
383
384         return handle;
385 }
386 EXPORT_SYMBOL(ion_alloc);
387
388 void ion_free_nolock(struct ion_client *client,
389                      struct ion_handle *handle)
390 {
391         if (!ion_handle_validate(client, handle)) {
392                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
393                 return;
394         }
395         ion_handle_put_nolock(handle);
396 }
397
398 void ion_free(struct ion_client *client, struct ion_handle *handle)
399 {
400         BUG_ON(client != handle->client);
401
402         mutex_lock(&client->lock);
403         ion_free_nolock(client, handle);
404         mutex_unlock(&client->lock);
405 }
406 EXPORT_SYMBOL(ion_free);
407
408 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
409 {
410         void *vaddr;
411
412         if (buffer->kmap_cnt) {
413                 buffer->kmap_cnt++;
414                 return buffer->vaddr;
415         }
416         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
417         if (WARN_ONCE(vaddr == NULL,
418                       "heap->ops->map_kernel should return ERR_PTR on error"))
419                 return ERR_PTR(-EINVAL);
420         if (IS_ERR(vaddr))
421                 return vaddr;
422         buffer->vaddr = vaddr;
423         buffer->kmap_cnt++;
424         return vaddr;
425 }
426
427 static void *ion_handle_kmap_get(struct ion_handle *handle)
428 {
429         struct ion_buffer *buffer = handle->buffer;
430         void *vaddr;
431
432         if (handle->kmap_cnt) {
433                 handle->kmap_cnt++;
434                 return buffer->vaddr;
435         }
436         vaddr = ion_buffer_kmap_get(buffer);
437         if (IS_ERR(vaddr))
438                 return vaddr;
439         handle->kmap_cnt++;
440         return vaddr;
441 }
442
443 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
444 {
445         buffer->kmap_cnt--;
446         if (!buffer->kmap_cnt) {
447                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
448                 buffer->vaddr = NULL;
449         }
450 }
451
452 static void ion_handle_kmap_put(struct ion_handle *handle)
453 {
454         struct ion_buffer *buffer = handle->buffer;
455
456         if (!handle->kmap_cnt) {
457                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
458                 return;
459         }
460         handle->kmap_cnt--;
461         if (!handle->kmap_cnt)
462                 ion_buffer_kmap_put(buffer);
463 }
464
465 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
466 {
467         struct ion_buffer *buffer;
468         void *vaddr;
469
470         mutex_lock(&client->lock);
471         if (!ion_handle_validate(client, handle)) {
472                 pr_err("%s: invalid handle passed to map_kernel.\n",
473                        __func__);
474                 mutex_unlock(&client->lock);
475                 return ERR_PTR(-EINVAL);
476         }
477
478         buffer = handle->buffer;
479
480         if (!handle->buffer->heap->ops->map_kernel) {
481                 pr_err("%s: map_kernel is not implemented by this heap.\n",
482                        __func__);
483                 mutex_unlock(&client->lock);
484                 return ERR_PTR(-ENODEV);
485         }
486
487         mutex_lock(&buffer->lock);
488         vaddr = ion_handle_kmap_get(handle);
489         mutex_unlock(&buffer->lock);
490         mutex_unlock(&client->lock);
491         return vaddr;
492 }
493 EXPORT_SYMBOL(ion_map_kernel);
494
495 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
496 {
497         struct ion_buffer *buffer;
498
499         mutex_lock(&client->lock);
500         buffer = handle->buffer;
501         mutex_lock(&buffer->lock);
502         ion_handle_kmap_put(handle);
503         mutex_unlock(&buffer->lock);
504         mutex_unlock(&client->lock);
505 }
506 EXPORT_SYMBOL(ion_unmap_kernel);
507
508 static struct mutex debugfs_mutex;
509 static struct rb_root *ion_root_client;
510 static int is_client_alive(struct ion_client *client)
511 {
512         struct rb_node *node;
513         struct ion_client *tmp;
514         struct ion_device *dev;
515
516         node = ion_root_client->rb_node;
517         dev = container_of(ion_root_client, struct ion_device, clients);
518
519         down_read(&dev->lock);
520         while (node) {
521                 tmp = rb_entry(node, struct ion_client, node);
522                 if (client < tmp) {
523                         node = node->rb_left;
524                 } else if (client > tmp) {
525                         node = node->rb_right;
526                 } else {
527                         up_read(&dev->lock);
528                         return 1;
529                 }
530         }
531
532         up_read(&dev->lock);
533         return 0;
534 }
535
536 static int ion_debug_client_show(struct seq_file *s, void *unused)
537 {
538         struct ion_client *client = s->private;
539         struct rb_node *n;
540         size_t sizes[ION_NUM_HEAP_IDS] = {0};
541         const char *names[ION_NUM_HEAP_IDS] = {NULL};
542         int i;
543
544         mutex_lock(&debugfs_mutex);
545         if (!is_client_alive(client)) {
546                 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
547                            client);
548                 mutex_unlock(&debugfs_mutex);
549                 return 0;
550         }
551
552         mutex_lock(&client->lock);
553         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
554                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
555                                                      node);
556                 unsigned int id = handle->buffer->heap->id;
557
558                 if (!names[id])
559                         names[id] = handle->buffer->heap->name;
560                 sizes[id] += handle->buffer->size;
561         }
562         mutex_unlock(&client->lock);
563         mutex_unlock(&debugfs_mutex);
564
565         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
566         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
567                 if (!names[i])
568                         continue;
569                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
570         }
571         return 0;
572 }
573
574 static int ion_debug_client_open(struct inode *inode, struct file *file)
575 {
576         return single_open(file, ion_debug_client_show, inode->i_private);
577 }
578
579 static const struct file_operations debug_client_fops = {
580         .open = ion_debug_client_open,
581         .read = seq_read,
582         .llseek = seq_lseek,
583         .release = single_release,
584 };
585
586 static int ion_get_client_serial(const struct rb_root *root,
587                                  const unsigned char *name)
588 {
589         int serial = -1;
590         struct rb_node *node;
591
592         for (node = rb_first(root); node; node = rb_next(node)) {
593                 struct ion_client *client = rb_entry(node, struct ion_client,
594                                                      node);
595
596                 if (strcmp(client->name, name))
597                         continue;
598                 serial = max(serial, client->display_serial);
599         }
600         return serial + 1;
601 }
602
603 struct ion_client *ion_client_create(struct ion_device *dev,
604                                      const char *name)
605 {
606         struct ion_client *client;
607         struct task_struct *task;
608         struct rb_node **p;
609         struct rb_node *parent = NULL;
610         struct ion_client *entry;
611         pid_t pid;
612
613         if (!name) {
614                 pr_err("%s: Name cannot be null\n", __func__);
615                 return ERR_PTR(-EINVAL);
616         }
617
618         get_task_struct(current->group_leader);
619         task_lock(current->group_leader);
620         pid = task_pid_nr(current->group_leader);
621         /*
622          * don't bother to store task struct for kernel threads,
623          * they can't be killed anyway
624          */
625         if (current->group_leader->flags & PF_KTHREAD) {
626                 put_task_struct(current->group_leader);
627                 task = NULL;
628         } else {
629                 task = current->group_leader;
630         }
631         task_unlock(current->group_leader);
632
633         client = kzalloc(sizeof(*client), GFP_KERNEL);
634         if (!client)
635                 goto err_put_task_struct;
636
637         client->dev = dev;
638         client->handles = RB_ROOT;
639         idr_init(&client->idr);
640         mutex_init(&client->lock);
641         client->task = task;
642         client->pid = pid;
643         client->name = kstrdup(name, GFP_KERNEL);
644         if (!client->name)
645                 goto err_free_client;
646
647         down_write(&dev->lock);
648         client->display_serial = ion_get_client_serial(&dev->clients, name);
649         client->display_name = kasprintf(
650                 GFP_KERNEL, "%s-%d", name, client->display_serial);
651         if (!client->display_name) {
652                 up_write(&dev->lock);
653                 goto err_free_client_name;
654         }
655         p = &dev->clients.rb_node;
656         while (*p) {
657                 parent = *p;
658                 entry = rb_entry(parent, struct ion_client, node);
659
660                 if (client < entry)
661                         p = &(*p)->rb_left;
662                 else if (client > entry)
663                         p = &(*p)->rb_right;
664         }
665         rb_link_node(&client->node, parent, p);
666         rb_insert_color(&client->node, &dev->clients);
667
668         client->debug_root = debugfs_create_file(client->display_name, 0664,
669                                                  dev->clients_debug_root,
670                                                  client, &debug_client_fops);
671         if (!client->debug_root) {
672                 char buf[256], *path;
673
674                 path = dentry_path(dev->clients_debug_root, buf, 256);
675                 pr_err("Failed to create client debugfs at %s/%s\n",
676                        path, client->display_name);
677         }
678
679         up_write(&dev->lock);
680
681         return client;
682
683 err_free_client_name:
684         kfree(client->name);
685 err_free_client:
686         kfree(client);
687 err_put_task_struct:
688         if (task)
689                 put_task_struct(current->group_leader);
690         return ERR_PTR(-ENOMEM);
691 }
692 EXPORT_SYMBOL(ion_client_create);
693
694 void ion_client_destroy(struct ion_client *client)
695 {
696         struct ion_device *dev = client->dev;
697         struct rb_node *n;
698
699         pr_debug("%s: %d\n", __func__, __LINE__);
700         mutex_lock(&debugfs_mutex);
701         while ((n = rb_first(&client->handles))) {
702                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
703                                                      node);
704                 ion_handle_destroy(&handle->ref);
705         }
706
707         idr_destroy(&client->idr);
708
709         down_write(&dev->lock);
710         if (client->task)
711                 put_task_struct(client->task);
712         rb_erase(&client->node, &dev->clients);
713         debugfs_remove_recursive(client->debug_root);
714         up_write(&dev->lock);
715
716         kfree(client->display_name);
717         kfree(client->name);
718         kfree(client);
719         mutex_unlock(&debugfs_mutex);
720 }
721 EXPORT_SYMBOL(ion_client_destroy);
722
723 static struct sg_table *dup_sg_table(struct sg_table *table)
724 {
725         struct sg_table *new_table;
726         int ret, i;
727         struct scatterlist *sg, *new_sg;
728
729         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
730         if (!new_table)
731                 return ERR_PTR(-ENOMEM);
732
733         ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
734         if (ret) {
735                 kfree(new_table);
736                 return ERR_PTR(-ENOMEM);
737         }
738
739         new_sg = new_table->sgl;
740         for_each_sg(table->sgl, sg, table->nents, i) {
741                 memcpy(new_sg, sg, sizeof(*sg));
742                 sg->dma_address = 0;
743                 new_sg = sg_next(new_sg);
744         }
745
746         return new_table;
747 }
748
749 static void free_duped_table(struct sg_table *table)
750 {
751         sg_free_table(table);
752         kfree(table);
753 }
754
755 struct ion_dma_buf_attachment {
756         struct device *dev;
757         struct sg_table *table;
758         struct list_head list;
759 };
760
761 static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
762                                 struct dma_buf_attachment *attachment)
763 {
764         struct ion_dma_buf_attachment *a;
765         struct sg_table *table;
766         struct ion_buffer *buffer = dmabuf->priv;
767
768         a = kzalloc(sizeof(*a), GFP_KERNEL);
769         if (!a)
770                 return -ENOMEM;
771
772         table = dup_sg_table(buffer->sg_table);
773         if (IS_ERR(table)) {
774                 kfree(a);
775                 return -ENOMEM;
776         }
777
778         a->table = table;
779         a->dev = dev;
780         INIT_LIST_HEAD(&a->list);
781
782         attachment->priv = a;
783
784         mutex_lock(&buffer->lock);
785         list_add(&a->list, &buffer->attachments);
786         mutex_unlock(&buffer->lock);
787
788         return 0;
789 }
790
791 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
792                                 struct dma_buf_attachment *attachment)
793 {
794         struct ion_dma_buf_attachment *a = attachment->priv;
795         struct ion_buffer *buffer = dmabuf->priv;
796
797         free_duped_table(a->table);
798         mutex_lock(&buffer->lock);
799         list_del(&a->list);
800         mutex_unlock(&buffer->lock);
801
802         kfree(a);
803 }
804
805
806 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
807                                         enum dma_data_direction direction)
808 {
809         struct ion_dma_buf_attachment *a = attachment->priv;
810         struct sg_table *table;
811         int ret;
812
813         table = a->table;
814
815         if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
816                         direction)){
817                 ret = -ENOMEM;
818                 goto err;
819         }
820         return table;
821
822 err:
823         free_duped_table(table);
824         return ERR_PTR(ret);
825 }
826
827 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
828                               struct sg_table *table,
829                               enum dma_data_direction direction)
830 {
831         dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
832 }
833
834 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
835 {
836         struct ion_buffer *buffer = dmabuf->priv;
837         int ret = 0;
838
839         if (!buffer->heap->ops->map_user) {
840                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
841                        __func__);
842                 return -EINVAL;
843         }
844
845         if (!(buffer->flags & ION_FLAG_CACHED))
846                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
847
848         mutex_lock(&buffer->lock);
849         /* now map it to userspace */
850         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
851         mutex_unlock(&buffer->lock);
852
853         if (ret)
854                 pr_err("%s: failure mapping buffer to userspace\n",
855                        __func__);
856
857         return ret;
858 }
859
860 static void ion_dma_buf_release(struct dma_buf *dmabuf)
861 {
862         struct ion_buffer *buffer = dmabuf->priv;
863
864         ion_buffer_put(buffer);
865 }
866
867 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
868 {
869         struct ion_buffer *buffer = dmabuf->priv;
870
871         return buffer->vaddr + offset * PAGE_SIZE;
872 }
873
874 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
875                                void *ptr)
876 {
877 }
878
879 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
880                                         enum dma_data_direction direction)
881 {
882         struct ion_buffer *buffer = dmabuf->priv;
883         void *vaddr;
884         struct ion_dma_buf_attachment *a;
885
886         /*
887          * TODO: Move this elsewhere because we don't always need a vaddr
888          */
889         if (buffer->heap->ops->map_kernel) {
890                 mutex_lock(&buffer->lock);
891                 vaddr = ion_buffer_kmap_get(buffer);
892                 mutex_unlock(&buffer->lock);
893         }
894
895
896         mutex_lock(&buffer->lock);
897         list_for_each_entry(a, &buffer->attachments, list) {
898                 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
899                                         DMA_BIDIRECTIONAL);
900         }
901         mutex_unlock(&buffer->lock);
902
903         return 0;
904 }
905
906 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
907                                       enum dma_data_direction direction)
908 {
909         struct ion_buffer *buffer = dmabuf->priv;
910         struct ion_dma_buf_attachment *a;
911
912         if (buffer->heap->ops->map_kernel) {
913                 mutex_lock(&buffer->lock);
914                 ion_buffer_kmap_put(buffer);
915                 mutex_unlock(&buffer->lock);
916         }
917
918         mutex_lock(&buffer->lock);
919         list_for_each_entry(a, &buffer->attachments, list) {
920                 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
921                                         DMA_BIDIRECTIONAL);
922         }
923         mutex_unlock(&buffer->lock);
924
925         return 0;
926 }
927
928 static const struct dma_buf_ops dma_buf_ops = {
929         .map_dma_buf = ion_map_dma_buf,
930         .unmap_dma_buf = ion_unmap_dma_buf,
931         .mmap = ion_mmap,
932         .release = ion_dma_buf_release,
933         .attach = ion_dma_buf_attach,
934         .detach = ion_dma_buf_detatch,
935         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
936         .end_cpu_access = ion_dma_buf_end_cpu_access,
937         .kmap_atomic = ion_dma_buf_kmap,
938         .kunmap_atomic = ion_dma_buf_kunmap,
939         .kmap = ion_dma_buf_kmap,
940         .kunmap = ion_dma_buf_kunmap,
941 };
942
943 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
944                                   struct ion_handle *handle)
945 {
946         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
947         struct ion_buffer *buffer;
948         struct dma_buf *dmabuf;
949         bool valid_handle;
950
951         mutex_lock(&client->lock);
952         valid_handle = ion_handle_validate(client, handle);
953         if (!valid_handle) {
954                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
955                 mutex_unlock(&client->lock);
956                 return ERR_PTR(-EINVAL);
957         }
958         buffer = handle->buffer;
959         ion_buffer_get(buffer);
960         mutex_unlock(&client->lock);
961
962         exp_info.ops = &dma_buf_ops;
963         exp_info.size = buffer->size;
964         exp_info.flags = O_RDWR;
965         exp_info.priv = buffer;
966
967         dmabuf = dma_buf_export(&exp_info);
968         if (IS_ERR(dmabuf)) {
969                 ion_buffer_put(buffer);
970                 return dmabuf;
971         }
972
973         return dmabuf;
974 }
975 EXPORT_SYMBOL(ion_share_dma_buf);
976
977 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
978 {
979         struct dma_buf *dmabuf;
980         int fd;
981
982         dmabuf = ion_share_dma_buf(client, handle);
983         if (IS_ERR(dmabuf))
984                 return PTR_ERR(dmabuf);
985
986         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
987         if (fd < 0)
988                 dma_buf_put(dmabuf);
989
990         return fd;
991 }
992 EXPORT_SYMBOL(ion_share_dma_buf_fd);
993
994 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
995 {
996         struct ion_device *dev = client->dev;
997         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
998         int ret = -EINVAL, cnt = 0, max_cnt;
999         struct ion_heap *heap;
1000         struct ion_heap_data hdata;
1001
1002         memset(&hdata, 0, sizeof(hdata));
1003
1004         down_read(&dev->lock);
1005         if (!buffer) {
1006                 query->cnt = dev->heap_cnt;
1007                 ret = 0;
1008                 goto out;
1009         }
1010
1011         if (query->cnt <= 0)
1012                 goto out;
1013
1014         max_cnt = query->cnt;
1015
1016         plist_for_each_entry(heap, &dev->heaps, node) {
1017                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1018                 hdata.name[sizeof(hdata.name) - 1] = '\0';
1019                 hdata.type = heap->type;
1020                 hdata.heap_id = heap->id;
1021
1022                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1023                         ret = -EFAULT;
1024                         goto out;
1025                 }
1026
1027                 cnt++;
1028                 if (cnt >= max_cnt)
1029                         break;
1030         }
1031
1032         query->cnt = cnt;
1033 out:
1034         up_read(&dev->lock);
1035         return ret;
1036 }
1037
1038 static int ion_release(struct inode *inode, struct file *file)
1039 {
1040         struct ion_client *client = file->private_data;
1041
1042         pr_debug("%s: %d\n", __func__, __LINE__);
1043         ion_client_destroy(client);
1044         return 0;
1045 }
1046
1047 static int ion_open(struct inode *inode, struct file *file)
1048 {
1049         struct miscdevice *miscdev = file->private_data;
1050         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1051         struct ion_client *client;
1052         char debug_name[64];
1053
1054         pr_debug("%s: %d\n", __func__, __LINE__);
1055         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1056         client = ion_client_create(dev, debug_name);
1057         if (IS_ERR(client))
1058                 return PTR_ERR(client);
1059         file->private_data = client;
1060
1061         return 0;
1062 }
1063
1064 static const struct file_operations ion_fops = {
1065         .owner          = THIS_MODULE,
1066         .open           = ion_open,
1067         .release        = ion_release,
1068         .unlocked_ioctl = ion_ioctl,
1069 #ifdef CONFIG_COMPAT
1070         .compat_ioctl   = ion_ioctl,
1071 #endif
1072 };
1073
1074 static size_t ion_debug_heap_total(struct ion_client *client,
1075                                    unsigned int id)
1076 {
1077         size_t size = 0;
1078         struct rb_node *n;
1079
1080         mutex_lock(&client->lock);
1081         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1082                 struct ion_handle *handle = rb_entry(n,
1083                                                      struct ion_handle,
1084                                                      node);
1085                 if (handle->buffer->heap->id == id)
1086                         size += handle->buffer->size;
1087         }
1088         mutex_unlock(&client->lock);
1089         return size;
1090 }
1091
1092 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1093 {
1094         struct ion_heap *heap = s->private;
1095         struct ion_device *dev = heap->dev;
1096         struct rb_node *n;
1097         size_t total_size = 0;
1098         size_t total_orphaned_size = 0;
1099
1100         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1101         seq_puts(s, "----------------------------------------------------\n");
1102
1103         mutex_lock(&debugfs_mutex);
1104         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1105                 struct ion_client *client = rb_entry(n, struct ion_client,
1106                                                      node);
1107                 size_t size = ion_debug_heap_total(client, heap->id);
1108
1109                 if (!size)
1110                         continue;
1111                 if (client->task) {
1112                         char task_comm[TASK_COMM_LEN];
1113
1114                         get_task_comm(task_comm, client->task);
1115                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1116                                    client->pid, size);
1117                 } else {
1118                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1119                                    client->pid, size);
1120                 }
1121         }
1122         mutex_unlock(&debugfs_mutex);
1123
1124         seq_puts(s, "----------------------------------------------------\n");
1125         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1126         mutex_lock(&dev->buffer_lock);
1127         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1128                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1129                                                      node);
1130                 if (buffer->heap->id != heap->id)
1131                         continue;
1132                 total_size += buffer->size;
1133                 if (!buffer->handle_count) {
1134                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1135                                    buffer->task_comm, buffer->pid,
1136                                    buffer->size, buffer->kmap_cnt,
1137                                    kref_read(&buffer->ref));
1138                         total_orphaned_size += buffer->size;
1139                 }
1140         }
1141         mutex_unlock(&dev->buffer_lock);
1142         seq_puts(s, "----------------------------------------------------\n");
1143         seq_printf(s, "%16s %16zu\n", "total orphaned",
1144                    total_orphaned_size);
1145         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1146         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1147                 seq_printf(s, "%16s %16zu\n", "deferred free",
1148                            heap->free_list_size);
1149         seq_puts(s, "----------------------------------------------------\n");
1150
1151         if (heap->debug_show)
1152                 heap->debug_show(heap, s, unused);
1153
1154         return 0;
1155 }
1156
1157 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1158 {
1159         return single_open(file, ion_debug_heap_show, inode->i_private);
1160 }
1161
1162 static const struct file_operations debug_heap_fops = {
1163         .open = ion_debug_heap_open,
1164         .read = seq_read,
1165         .llseek = seq_lseek,
1166         .release = single_release,
1167 };
1168
1169 static int debug_shrink_set(void *data, u64 val)
1170 {
1171         struct ion_heap *heap = data;
1172         struct shrink_control sc;
1173         int objs;
1174
1175         sc.gfp_mask = GFP_HIGHUSER;
1176         sc.nr_to_scan = val;
1177
1178         if (!val) {
1179                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1180                 sc.nr_to_scan = objs;
1181         }
1182
1183         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1184         return 0;
1185 }
1186
1187 static int debug_shrink_get(void *data, u64 *val)
1188 {
1189         struct ion_heap *heap = data;
1190         struct shrink_control sc;
1191         int objs;
1192
1193         sc.gfp_mask = GFP_HIGHUSER;
1194         sc.nr_to_scan = 0;
1195
1196         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1197         *val = objs;
1198         return 0;
1199 }
1200
1201 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1202                         debug_shrink_set, "%llu\n");
1203
1204 void ion_device_add_heap(struct ion_heap *heap)
1205 {
1206         struct dentry *debug_file;
1207         struct ion_device *dev = internal_dev;
1208
1209         if (!heap->ops->allocate || !heap->ops->free)
1210                 pr_err("%s: can not add heap with invalid ops struct.\n",
1211                        __func__);
1212
1213         spin_lock_init(&heap->free_lock);
1214         heap->free_list_size = 0;
1215
1216         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1217                 ion_heap_init_deferred_free(heap);
1218
1219         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1220                 ion_heap_init_shrinker(heap);
1221
1222         heap->dev = dev;
1223         down_write(&dev->lock);
1224         heap->id = heap_id++;
1225         /*
1226          * use negative heap->id to reverse the priority -- when traversing
1227          * the list later attempt higher id numbers first
1228          */
1229         plist_node_init(&heap->node, -heap->id);
1230         plist_add(&heap->node, &dev->heaps);
1231         debug_file = debugfs_create_file(heap->name, 0664,
1232                                          dev->heaps_debug_root, heap,
1233                                          &debug_heap_fops);
1234
1235         if (!debug_file) {
1236                 char buf[256], *path;
1237
1238                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1239                 pr_err("Failed to create heap debugfs at %s/%s\n",
1240                        path, heap->name);
1241         }
1242
1243         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1244                 char debug_name[64];
1245
1246                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1247                 debug_file = debugfs_create_file(
1248                         debug_name, 0644, dev->heaps_debug_root, heap,
1249                         &debug_shrink_fops);
1250                 if (!debug_file) {
1251                         char buf[256], *path;
1252
1253                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1254                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1255                                path, debug_name);
1256                 }
1257         }
1258
1259         dev->heap_cnt++;
1260         up_write(&dev->lock);
1261 }
1262 EXPORT_SYMBOL(ion_device_add_heap);
1263
1264 int ion_device_create(void)
1265 {
1266         struct ion_device *idev;
1267         int ret;
1268
1269         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1270         if (!idev)
1271                 return -ENOMEM;
1272
1273         idev->dev.minor = MISC_DYNAMIC_MINOR;
1274         idev->dev.name = "ion";
1275         idev->dev.fops = &ion_fops;
1276         idev->dev.parent = NULL;
1277         ret = misc_register(&idev->dev);
1278         if (ret) {
1279                 pr_err("ion: failed to register misc device.\n");
1280                 kfree(idev);
1281                 return ret;
1282         }
1283
1284         idev->debug_root = debugfs_create_dir("ion", NULL);
1285         if (!idev->debug_root) {
1286                 pr_err("ion: failed to create debugfs root directory.\n");
1287                 goto debugfs_done;
1288         }
1289         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1290         if (!idev->heaps_debug_root) {
1291                 pr_err("ion: failed to create debugfs heaps directory.\n");
1292                 goto debugfs_done;
1293         }
1294         idev->clients_debug_root = debugfs_create_dir("clients",
1295                                                 idev->debug_root);
1296         if (!idev->clients_debug_root)
1297                 pr_err("ion: failed to create debugfs clients directory.\n");
1298
1299 debugfs_done:
1300         idev->buffers = RB_ROOT;
1301         mutex_init(&idev->buffer_lock);
1302         init_rwsem(&idev->lock);
1303         plist_head_init(&idev->heaps);
1304         idev->clients = RB_ROOT;
1305         ion_root_client = &idev->clients;
1306         mutex_init(&debugfs_mutex);
1307         internal_dev = idev;
1308         return 0;
1309 }
1310 subsys_initcall(ion_device_create);